2013-09-30 19:37:13 -07:00
|
|
|
/******************************************************************************
|
2014-03-07 05:55:21 -08:00
|
|
|
Copyright (C) 2013-2014 by Hugh Bailey <obs.jim@gmail.com>
|
2013-09-30 19:37:13 -07:00
|
|
|
|
|
|
|
This program is free software: you can redistribute it and/or modify
|
|
|
|
it under the terms of the GNU General Public License as published by
|
2013-12-02 21:24:38 -08:00
|
|
|
the Free Software Foundation, either version 2 of the License, or
|
2013-09-30 19:37:13 -07:00
|
|
|
(at your option) any later version.
|
|
|
|
|
|
|
|
This program is distributed in the hope that it will be useful,
|
|
|
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
GNU General Public License for more details.
|
|
|
|
|
|
|
|
You should have received a copy of the GNU General Public License
|
|
|
|
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
******************************************************************************/
|
|
|
|
|
2014-08-24 17:32:44 -07:00
|
|
|
#include <inttypes.h>
|
2014-07-02 16:38:29 -07:00
|
|
|
#include "util/platform.h"
|
2020-03-21 02:55:12 -07:00
|
|
|
#include "util/util_uint64.h"
|
2022-06-25 16:19:48 -07:00
|
|
|
#include "graphics/math-extra.h"
|
2013-09-30 19:37:13 -07:00
|
|
|
#include "obs.h"
|
2014-01-26 17:48:14 -08:00
|
|
|
#include "obs-internal.h"
|
2013-09-30 19:37:13 -07:00
|
|
|
|
2016-11-17 05:25:23 -08:00
|
|
|
#include <caption/caption.h>
|
2018-06-28 11:22:44 -07:00
|
|
|
#include <caption/mpeg.h>
|
2016-11-17 05:25:23 -08:00
|
|
|
|
2022-02-02 21:56:30 -08:00
|
|
|
#define get_weak(output) ((obs_weak_output_t *)output->context.control)
|
|
|
|
|
2022-06-25 16:19:48 -07:00
|
|
|
#define RECONNECT_RETRY_MAX_MSEC (15 * 60 * 1000)
|
2022-06-26 00:28:00 -07:00
|
|
|
#define RECONNECT_RETRY_BASE_EXP 1.5f
|
2022-06-25 16:19:48 -07:00
|
|
|
|
2016-06-20 16:07:29 -07:00
|
|
|
static inline bool active(const struct obs_output *output)
|
|
|
|
{
|
|
|
|
return os_atomic_load_bool(&output->active);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline bool reconnecting(const struct obs_output *output)
|
|
|
|
{
|
|
|
|
return os_atomic_load_bool(&output->reconnecting);
|
|
|
|
}
|
|
|
|
|
2016-06-20 17:09:21 -07:00
|
|
|
static inline bool stopping(const struct obs_output *output)
|
|
|
|
{
|
|
|
|
return os_event_try(output->stopping_event) == EAGAIN;
|
|
|
|
}
|
|
|
|
|
2016-06-20 16:07:29 -07:00
|
|
|
static inline bool delay_active(const struct obs_output *output)
|
|
|
|
{
|
|
|
|
return os_atomic_load_bool(&output->delay_active);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline bool delay_capturing(const struct obs_output *output)
|
|
|
|
{
|
|
|
|
return os_atomic_load_bool(&output->delay_capturing);
|
|
|
|
}
|
|
|
|
|
2016-06-21 17:25:26 -07:00
|
|
|
static inline bool data_capture_ending(const struct obs_output *output)
|
|
|
|
{
|
|
|
|
return os_atomic_load_bool(&output->end_data_capture_thread_active);
|
|
|
|
}
|
|
|
|
|
2014-07-28 16:08:56 -07:00
|
|
|
const struct obs_output_info *find_output(const char *id)
|
2013-09-30 19:37:13 -07:00
|
|
|
{
|
|
|
|
size_t i;
|
|
|
|
for (i = 0; i < obs->output_types.num; i++)
|
2013-12-20 16:23:19 -08:00
|
|
|
if (strcmp(obs->output_types.array[i].id, id) == 0)
|
2019-06-22 22:13:45 -07:00
|
|
|
return obs->output_types.array + i;
|
2013-09-30 19:37:13 -07:00
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2014-08-04 08:41:15 -07:00
|
|
|
const char *obs_output_get_display_name(const char *id)
|
libobs: Add services API, reduce repeated code
Add API for streaming services. The services API simplifies the
creation of custom service features and user interface.
Custom streaming services later on will be able to do things such as:
- Be able to use service-specific APIs via modules, allowing a more
direct means of communicating with the service and requesting or
setting service-specific information
- Get URL/stream key via other means of authentication such as OAuth,
or be able to build custom URLs for services that require that sort
of thing.
- Query information (such as viewer count, chat, follower
notifications, and other information)
- Set channel information (such as current game, current channel title,
activating commercials)
Also, I reduce some repeated code that was used for all libobs objects.
This includes the name of the object, the private data, settings, as
well as the signal and procedure handlers.
I also switched to using linked lists for the global object lists,
rather than using an array of pointers (you could say it was..
pointless.) ..Anyway, the linked list info is also stored in the shared
context data structure.
2014-04-19 20:38:53 -07:00
|
|
|
{
|
|
|
|
const struct obs_output_info *info = find_output(id);
|
2015-09-16 01:30:51 -07:00
|
|
|
return (info != NULL) ? info->get_name(info->type_data) : NULL;
|
libobs: Add services API, reduce repeated code
Add API for streaming services. The services API simplifies the
creation of custom service features and user interface.
Custom streaming services later on will be able to do things such as:
- Be able to use service-specific APIs via modules, allowing a more
direct means of communicating with the service and requesting or
setting service-specific information
- Get URL/stream key via other means of authentication such as OAuth,
or be able to build custom URLs for services that require that sort
of thing.
- Query information (such as viewer count, chat, follower
notifications, and other information)
- Set channel information (such as current game, current channel title,
activating commercials)
Also, I reduce some repeated code that was used for all libobs objects.
This includes the name of the object, the private data, settings, as
well as the signal and procedure handlers.
I also switched to using linked lists for the global object lists,
rather than using an array of pointers (you could say it was..
pointless.) ..Anyway, the linked list info is also stored in the shared
context data structure.
2014-04-19 20:38:53 -07:00
|
|
|
}
|
|
|
|
|
2014-04-01 11:55:18 -07:00
|
|
|
static const char *output_signals[] = {
|
2014-05-12 15:30:36 -07:00
|
|
|
"void start(ptr output)",
|
|
|
|
"void stop(ptr output, int code)",
|
2019-07-07 12:27:13 -07:00
|
|
|
"void pause(ptr output)",
|
|
|
|
"void unpause(ptr output)",
|
libobs: Add encoded output delay support
This feature allows a user to delay an output (as long as the output
itself supports it). Needless to say this intended for live streams,
where users may want to delay their streams to prevent stream sniping,
cheating, and other such things.
The design this time was a bit more elaborate, but still simple in
design: the user can now schedule stops/starts without having to wait
for the stream itself to stop before being able to take any action.
Optionally, they can also forcibly stop stream (and delay) in case
something happens which they might not want to be streamed.
Additionally, a new option was added to preserve stream cutoff point on
disconnections/reconnections, so that if you get disconnected while
streaming, when it reconnects, it will reconnect right at the point
where it left off. This will probably be quite useful for a number of
applications in addition to regular delay, such as setting the delay to
1 second and then using this feature to minimize, for example, a
critical stream such as a tournament stream from getting any of its
stream data cut off. However, using this feature will of course cause
the stream data to buffer and increase delay (and memory usage) while
it's in the process of reconnecting.
2015-09-06 15:39:46 -07:00
|
|
|
"void starting(ptr output)",
|
|
|
|
"void stopping(ptr output)",
|
2015-09-10 11:02:20 -07:00
|
|
|
"void activate(ptr output)",
|
|
|
|
"void deactivate(ptr output)",
|
2014-05-12 15:30:36 -07:00
|
|
|
"void reconnect(ptr output)",
|
|
|
|
"void reconnect_success(ptr output)",
|
2019-06-22 22:13:45 -07:00
|
|
|
NULL,
|
2014-04-01 11:55:18 -07:00
|
|
|
};
|
|
|
|
|
libobs: Add services API, reduce repeated code
Add API for streaming services. The services API simplifies the
creation of custom service features and user interface.
Custom streaming services later on will be able to do things such as:
- Be able to use service-specific APIs via modules, allowing a more
direct means of communicating with the service and requesting or
setting service-specific information
- Get URL/stream key via other means of authentication such as OAuth,
or be able to build custom URLs for services that require that sort
of thing.
- Query information (such as viewer count, chat, follower
notifications, and other information)
- Set channel information (such as current game, current channel title,
activating commercials)
Also, I reduce some repeated code that was used for all libobs objects.
This includes the name of the object, the private data, settings, as
well as the signal and procedure handlers.
I also switched to using linked lists for the global object lists,
rather than using an array of pointers (you could say it was..
pointless.) ..Anyway, the linked list info is also stored in the shared
context data structure.
2014-04-19 20:38:53 -07:00
|
|
|
static bool init_output_handlers(struct obs_output *output, const char *name,
|
2019-06-22 22:13:45 -07:00
|
|
|
obs_data_t *settings, obs_data_t *hotkey_data)
|
2014-04-01 11:55:18 -07:00
|
|
|
{
|
2016-02-26 18:18:00 -08:00
|
|
|
if (!obs_context_data_init(&output->context, OBS_OBJ_TYPE_OUTPUT,
|
2019-06-22 22:13:45 -07:00
|
|
|
settings, name, hotkey_data, false))
|
2014-04-01 11:55:18 -07:00
|
|
|
return false;
|
|
|
|
|
libobs: Add services API, reduce repeated code
Add API for streaming services. The services API simplifies the
creation of custom service features and user interface.
Custom streaming services later on will be able to do things such as:
- Be able to use service-specific APIs via modules, allowing a more
direct means of communicating with the service and requesting or
setting service-specific information
- Get URL/stream key via other means of authentication such as OAuth,
or be able to build custom URLs for services that require that sort
of thing.
- Query information (such as viewer count, chat, follower
notifications, and other information)
- Set channel information (such as current game, current channel title,
activating commercials)
Also, I reduce some repeated code that was used for all libobs objects.
This includes the name of the object, the private data, settings, as
well as the signal and procedure handlers.
I also switched to using linked lists for the global object lists,
rather than using an array of pointers (you could say it was..
pointless.) ..Anyway, the linked list info is also stored in the shared
context data structure.
2014-04-19 20:38:53 -07:00
|
|
|
signal_handler_add_array(output->context.signals, output_signals);
|
2014-04-01 11:55:18 -07:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2014-09-25 17:44:05 -07:00
|
|
|
obs_output_t *obs_output_create(const char *id, const char *name,
|
2019-06-22 22:13:45 -07:00
|
|
|
obs_data_t *settings, obs_data_t *hotkey_data)
|
2013-09-30 19:37:13 -07:00
|
|
|
{
|
Revamp API and start using doxygen
The API used to be designed in such a way to where it would expect
exports for each individual source/output/encoder/etc. You would export
functions for each and it would automatically load those functions based
on a specific naming scheme from the module.
The idea behind this was that I wanted to limit the usage of structures
in the API so only functions could be used. It was an interesting idea
in theory, but this idea turned out to be flawed in a number of ways:
1.) Requiring exports to create sources/outputs/encoders/etc meant that
you could not create them by any other means, which meant that
things like faruton's .net plugin would become difficult.
2.) Export function declarations could not be checked, therefore if you
created a function with the wrong parameters and parameter types,
the compiler wouldn't know how to check for that.
3.) Required overly complex load functions in libobs just to handle it.
It makes much more sense to just have a load function that you call
manually. Complexity is the bane of all good programs.
4.) It required that you have functions of specific names, which looked
and felt somewhat unsightly.
So, to fix these issues, I replaced it with a more commonly used API
scheme, seen commonly in places like kernels and typical C libraries
with abstraction. You simply create a structure that contains the
callback definitions, and you pass it to a function to register that
definition (such as obs_register_source), which you call in the
obs_module_load of the module.
It will also automatically check the structure size and ensure that it
only loads the required values if the structure happened to add new
values in an API change.
The "main" source file for each module must include obs-module.h, and
must use OBS_DECLARE_MODULE() within that source file.
Also, started writing some doxygen documentation in to the main library
headers. Will add more detailed documentation as I go.
2014-02-12 07:04:50 -08:00
|
|
|
const struct obs_output_info *info = find_output(id);
|
2013-09-30 19:37:13 -07:00
|
|
|
struct obs_output *output;
|
2014-07-02 16:38:29 -07:00
|
|
|
int ret;
|
2013-09-30 19:37:13 -07:00
|
|
|
|
2014-03-10 13:10:35 -07:00
|
|
|
output = bzalloc(sizeof(struct obs_output));
|
2014-04-04 00:30:37 -07:00
|
|
|
pthread_mutex_init_value(&output->interleaved_mutex);
|
libobs: Add encoded output delay support
This feature allows a user to delay an output (as long as the output
itself supports it). Needless to say this intended for live streams,
where users may want to delay their streams to prevent stream sniping,
cheating, and other such things.
The design this time was a bit more elaborate, but still simple in
design: the user can now schedule stops/starts without having to wait
for the stream itself to stop before being able to take any action.
Optionally, they can also forcibly stop stream (and delay) in case
something happens which they might not want to be streamed.
Additionally, a new option was added to preserve stream cutoff point on
disconnections/reconnections, so that if you get disconnected while
streaming, when it reconnects, it will reconnect right at the point
where it left off. This will probably be quite useful for a number of
applications in addition to regular delay, such as setting the delay to
1 second and then using this feature to minimize, for example, a
critical stream such as a tournament stream from getting any of its
stream data cut off. However, using this feature will of course cause
the stream data to buffer and increase delay (and memory usage) while
it's in the process of reconnecting.
2015-09-06 15:39:46 -07:00
|
|
|
pthread_mutex_init_value(&output->delay_mutex);
|
2016-11-17 05:25:23 -08:00
|
|
|
pthread_mutex_init_value(&output->caption_mutex);
|
2019-07-07 12:27:13 -07:00
|
|
|
pthread_mutex_init_value(&output->pause.mutex);
|
2014-03-10 13:10:35 -07:00
|
|
|
|
2014-04-04 00:30:37 -07:00
|
|
|
if (pthread_mutex_init(&output->interleaved_mutex, NULL) != 0)
|
|
|
|
goto fail;
|
libobs: Add encoded output delay support
This feature allows a user to delay an output (as long as the output
itself supports it). Needless to say this intended for live streams,
where users may want to delay their streams to prevent stream sniping,
cheating, and other such things.
The design this time was a bit more elaborate, but still simple in
design: the user can now schedule stops/starts without having to wait
for the stream itself to stop before being able to take any action.
Optionally, they can also forcibly stop stream (and delay) in case
something happens which they might not want to be streamed.
Additionally, a new option was added to preserve stream cutoff point on
disconnections/reconnections, so that if you get disconnected while
streaming, when it reconnects, it will reconnect right at the point
where it left off. This will probably be quite useful for a number of
applications in addition to regular delay, such as setting the delay to
1 second and then using this feature to minimize, for example, a
critical stream such as a tournament stream from getting any of its
stream data cut off. However, using this feature will of course cause
the stream data to buffer and increase delay (and memory usage) while
it's in the process of reconnecting.
2015-09-06 15:39:46 -07:00
|
|
|
if (pthread_mutex_init(&output->delay_mutex, NULL) != 0)
|
|
|
|
goto fail;
|
2016-11-17 05:25:23 -08:00
|
|
|
if (pthread_mutex_init(&output->caption_mutex, NULL) != 0)
|
|
|
|
goto fail;
|
2019-07-07 12:27:13 -07:00
|
|
|
if (pthread_mutex_init(&output->pause.mutex, NULL) != 0)
|
|
|
|
goto fail;
|
2016-06-20 17:09:21 -07:00
|
|
|
if (os_event_init(&output->stopping_event, OS_EVENT_TYPE_MANUAL) != 0)
|
|
|
|
goto fail;
|
2014-11-01 13:41:17 -07:00
|
|
|
if (!init_output_handlers(output, name, settings, hotkey_data))
|
2014-03-10 13:10:35 -07:00
|
|
|
goto fail;
|
|
|
|
|
2016-06-20 17:09:21 -07:00
|
|
|
os_event_signal(output->stopping_event);
|
|
|
|
|
2015-09-13 11:55:06 -07:00
|
|
|
if (!info) {
|
|
|
|
blog(LOG_ERROR, "Output ID '%s' not found", id);
|
|
|
|
|
2019-06-22 22:13:45 -07:00
|
|
|
output->info.id = bstrdup(id);
|
2015-09-13 11:55:06 -07:00
|
|
|
output->owns_info_id = true;
|
|
|
|
} else {
|
|
|
|
output->info = *info;
|
|
|
|
}
|
2019-06-22 22:13:45 -07:00
|
|
|
output->video = obs_get_video();
|
|
|
|
output->audio = obs_get_audio();
|
2014-08-04 21:27:52 -07:00
|
|
|
if (output->info.get_defaults)
|
|
|
|
output->info.get_defaults(output->context.settings);
|
2014-03-16 17:42:37 -07:00
|
|
|
|
2014-07-02 16:38:29 -07:00
|
|
|
ret = os_event_init(&output->reconnect_stop_event,
|
2019-06-22 22:13:45 -07:00
|
|
|
OS_EVENT_TYPE_MANUAL);
|
2014-07-02 16:38:29 -07:00
|
|
|
if (ret < 0)
|
|
|
|
goto fail;
|
|
|
|
|
|
|
|
output->reconnect_retry_sec = 2;
|
|
|
|
output->reconnect_retry_max = 20;
|
2022-06-25 16:19:48 -07:00
|
|
|
output->reconnect_retry_exp =
|
2022-06-26 00:28:00 -07:00
|
|
|
RECONNECT_RETRY_BASE_EXP + (rand_float(0) * 0.05f);
|
2019-06-22 22:13:45 -07:00
|
|
|
output->valid = true;
|
2014-03-10 13:10:35 -07:00
|
|
|
|
2022-02-02 21:56:30 -08:00
|
|
|
obs_context_init_control(&output->context, output,
|
|
|
|
(obs_destroy_cb)obs_output_destroy);
|
2019-06-22 22:13:45 -07:00
|
|
|
obs_context_data_insert(&output->context, &obs->data.outputs_mutex,
|
|
|
|
&obs->data.first_output);
|
libobs: Add services API, reduce repeated code
Add API for streaming services. The services API simplifies the
creation of custom service features and user interface.
Custom streaming services later on will be able to do things such as:
- Be able to use service-specific APIs via modules, allowing a more
direct means of communicating with the service and requesting or
setting service-specific information
- Get URL/stream key via other means of authentication such as OAuth,
or be able to build custom URLs for services that require that sort
of thing.
- Query information (such as viewer count, chat, follower
notifications, and other information)
- Set channel information (such as current game, current channel title,
activating commercials)
Also, I reduce some repeated code that was used for all libobs objects.
This includes the name of the object, the private data, settings, as
well as the signal and procedure handlers.
I also switched to using linked lists for the global object lists,
rather than using an array of pointers (you could say it was..
pointless.) ..Anyway, the linked list info is also stored in the shared
context data structure.
2014-04-19 20:38:53 -07:00
|
|
|
|
2016-12-07 02:55:06 -08:00
|
|
|
if (info)
|
2019-06-22 22:13:45 -07:00
|
|
|
output->context.data =
|
|
|
|
info->create(output->context.settings, output);
|
2016-12-07 02:55:06 -08:00
|
|
|
if (!output->context.data)
|
|
|
|
blog(LOG_ERROR, "Failed to create output '%s'!", name);
|
|
|
|
|
2016-08-05 15:36:10 -07:00
|
|
|
blog(LOG_DEBUG, "output '%s' (%s) created", name, id);
|
2013-09-30 19:37:13 -07:00
|
|
|
return output;
|
2014-03-10 13:10:35 -07:00
|
|
|
|
|
|
|
fail:
|
|
|
|
obs_output_destroy(output);
|
|
|
|
return NULL;
|
2013-09-30 19:37:13 -07:00
|
|
|
}
|
|
|
|
|
2014-04-04 00:30:37 -07:00
|
|
|
static inline void free_packets(struct obs_output *output)
|
|
|
|
{
|
|
|
|
for (size_t i = 0; i < output->interleaved_packets.num; i++)
|
2019-06-22 22:13:45 -07:00
|
|
|
obs_encoder_packet_release(output->interleaved_packets.array +
|
|
|
|
i);
|
2014-04-04 00:30:37 -07:00
|
|
|
da_free(output->interleaved_packets);
|
|
|
|
}
|
|
|
|
|
2019-07-06 20:07:37 -07:00
|
|
|
static inline void clear_audio_buffers(obs_output_t *output)
|
|
|
|
{
|
|
|
|
for (size_t i = 0; i < MAX_AUDIO_MIXES; i++) {
|
|
|
|
for (size_t j = 0; j < MAX_AV_PLANES; j++) {
|
|
|
|
circlebuf_free(&output->audio_buffer[i][j]);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-09-25 17:44:05 -07:00
|
|
|
void obs_output_destroy(obs_output_t *output)
|
2013-09-30 19:37:13 -07:00
|
|
|
{
|
|
|
|
if (output) {
|
libobs: Add services API, reduce repeated code
Add API for streaming services. The services API simplifies the
creation of custom service features and user interface.
Custom streaming services later on will be able to do things such as:
- Be able to use service-specific APIs via modules, allowing a more
direct means of communicating with the service and requesting or
setting service-specific information
- Get URL/stream key via other means of authentication such as OAuth,
or be able to build custom URLs for services that require that sort
of thing.
- Query information (such as viewer count, chat, follower
notifications, and other information)
- Set channel information (such as current game, current channel title,
activating commercials)
Also, I reduce some repeated code that was used for all libobs objects.
This includes the name of the object, the private data, settings, as
well as the signal and procedure handlers.
I also switched to using linked lists for the global object lists,
rather than using an array of pointers (you could say it was..
pointless.) ..Anyway, the linked list info is also stored in the shared
context data structure.
2014-04-19 20:38:53 -07:00
|
|
|
obs_context_data_remove(&output->context);
|
2014-03-10 13:10:35 -07:00
|
|
|
|
2016-08-05 15:36:10 -07:00
|
|
|
blog(LOG_DEBUG, "output '%s' destroyed", output->context.name);
|
2014-07-13 05:01:02 -07:00
|
|
|
|
2016-06-20 16:07:29 -07:00
|
|
|
if (output->valid && active(output))
|
2016-06-11 11:42:29 -07:00
|
|
|
obs_output_actual_stop(output, true, 0);
|
2014-02-07 02:03:54 -08:00
|
|
|
|
2016-06-21 04:06:35 -07:00
|
|
|
os_event_wait(output->stopping_event);
|
2016-06-21 17:25:26 -07:00
|
|
|
if (data_capture_ending(output))
|
|
|
|
pthread_join(output->end_data_capture_thread, NULL);
|
2014-04-04 00:30:37 -07:00
|
|
|
|
2016-06-21 04:06:35 -07:00
|
|
|
if (output->service)
|
|
|
|
output->service->output = NULL;
|
libobs: Add services API, reduce repeated code
Add API for streaming services. The services API simplifies the
creation of custom service features and user interface.
Custom streaming services later on will be able to do things such as:
- Be able to use service-specific APIs via modules, allowing a more
direct means of communicating with the service and requesting or
setting service-specific information
- Get URL/stream key via other means of authentication such as OAuth,
or be able to build custom URLs for services that require that sort
of thing.
- Query information (such as viewer count, chat, follower
notifications, and other information)
- Set channel information (such as current game, current channel title,
activating commercials)
Also, I reduce some repeated code that was used for all libobs objects.
This includes the name of the object, the private data, settings, as
well as the signal and procedure handlers.
I also switched to using linked lists for the global object lists,
rather than using an array of pointers (you could say it was..
pointless.) ..Anyway, the linked list info is also stored in the shared
context data structure.
2014-04-19 20:38:53 -07:00
|
|
|
if (output->context.data)
|
|
|
|
output->info.destroy(output->context.data);
|
Add preliminary output/encoder interface
- First, I redid the output interface for libobs. I feel like it's
going in a pretty good direction in terms of design.
Right now, the design is so that outputs and encoders are separate.
One or more outputs can connect to a specific encoder to receive its
data, or the output can connect directly to raw data from libobs
output itself, if the output doesn't want to use a designated encoder.
Data is received via callbacks set when you connect to the encoder or
raw output. Multiple outputs can receive the data from a single
encoder context if need be (such as for streaming to multiple channels
at once, and/or recording with the same data).
When an encoder is first connected to, it will connect to raw output,
and start encoding. Additional connections will receive that same
data being encoded as well after that. When the last encoder has
disconnected, it will stop encoding. If for some reason the encoder
needs to stop, it will use the callback with NULL to signal that
encoding has stopped. Some of these things may be subject to change
in the future, though it feels pretty good with this design so far.
Will have to see how well it works out in practice versus theory.
- Second, Started adding preliminary RTMP/x264 output plugin code.
To speed things up, I might just make a direct raw->FFmpeg output to
create a quick output plugin that we can start using for testing all
the subsystems.
2014-01-16 21:34:51 -08:00
|
|
|
|
2016-06-21 04:06:35 -07:00
|
|
|
free_packets(output);
|
|
|
|
|
2014-05-13 12:53:13 -07:00
|
|
|
if (output->video_encoder) {
|
|
|
|
obs_encoder_remove_output(output->video_encoder,
|
2019-06-22 22:13:45 -07:00
|
|
|
output);
|
2014-05-13 12:53:13 -07:00
|
|
|
}
|
(API Change) Add support for multiple audio mixers
API changed:
--------------------------
void obs_output_set_audio_encoder(
obs_output_t *output,
obs_encoder_t *encoder);
obs_encoder_t *obs_output_get_audio_encoder(
const obs_output_t *output);
obs_encoder_t *obs_audio_encoder_create(
const char *id,
const char *name,
obs_data_t *settings);
Changed to:
--------------------------
/* 'idx' specifies the track index of the output */
void obs_output_set_audio_encoder(
obs_output_t *output,
obs_encoder_t *encoder,
size_t idx);
/* 'idx' specifies the track index of the output */
obs_encoder_t *obs_output_get_audio_encoder(
const obs_output_t *output,
size_t idx);
/* 'mixer_idx' specifies the mixer index to capture audio from */
obs_encoder_t *obs_audio_encoder_create(
const char *id,
const char *name,
obs_data_t *settings,
size_t mixer_idx);
Overview
--------------------------
This feature allows multiple audio mixers to be used at a time. This
capability was able to be added with surprisingly very little extra
overhead. Audio will not be mixed unless it's assigned to a specific
mixer, and mixers will not mix unless they have an active mix
connection.
Mostly this will be useful for being able to separate out specific audio
for recording versus streaming, but will also be useful for certain
streaming services that support multiple audio streams via RTMP.
I didn't want to use a variable amount of mixers due to the desire to
reduce heap allocations, so currently I set the limit to 4 simultaneous
mixers; this number can be increased later if needed, but honestly I
feel like it's just the right number to use.
Sources:
Sources can now specify which audio mixers their audio is mixed to; this
can be a single mixer or multiple mixers at a time. The
obs_source_set_audio_mixers function sets the audio mixer which an audio
source applies to. For example, 0xF would mean that the source applies
to all four mixers.
Audio Encoders:
Audio encoders now must specify which specific audio mixer they use when
they encode audio data.
Outputs:
Outputs that use encoders can now support multiple audio tracks at once
if they have the OBS_OUTPUT_MULTI_TRACK capability flag set. This is
mostly only useful for certain types of RTMP transmissions, though may
be useful for file formats that support multiple audio tracks as well
later on.
2015-01-14 02:12:08 -08:00
|
|
|
|
|
|
|
for (size_t i = 0; i < MAX_AUDIO_MIXES; i++) {
|
|
|
|
if (output->audio_encoders[i]) {
|
|
|
|
obs_encoder_remove_output(
|
2019-06-22 22:13:45 -07:00
|
|
|
output->audio_encoders[i], output);
|
(API Change) Add support for multiple audio mixers
API changed:
--------------------------
void obs_output_set_audio_encoder(
obs_output_t *output,
obs_encoder_t *encoder);
obs_encoder_t *obs_output_get_audio_encoder(
const obs_output_t *output);
obs_encoder_t *obs_audio_encoder_create(
const char *id,
const char *name,
obs_data_t *settings);
Changed to:
--------------------------
/* 'idx' specifies the track index of the output */
void obs_output_set_audio_encoder(
obs_output_t *output,
obs_encoder_t *encoder,
size_t idx);
/* 'idx' specifies the track index of the output */
obs_encoder_t *obs_output_get_audio_encoder(
const obs_output_t *output,
size_t idx);
/* 'mixer_idx' specifies the mixer index to capture audio from */
obs_encoder_t *obs_audio_encoder_create(
const char *id,
const char *name,
obs_data_t *settings,
size_t mixer_idx);
Overview
--------------------------
This feature allows multiple audio mixers to be used at a time. This
capability was able to be added with surprisingly very little extra
overhead. Audio will not be mixed unless it's assigned to a specific
mixer, and mixers will not mix unless they have an active mix
connection.
Mostly this will be useful for being able to separate out specific audio
for recording versus streaming, but will also be useful for certain
streaming services that support multiple audio streams via RTMP.
I didn't want to use a variable amount of mixers due to the desire to
reduce heap allocations, so currently I set the limit to 4 simultaneous
mixers; this number can be increased later if needed, but honestly I
feel like it's just the right number to use.
Sources:
Sources can now specify which audio mixers their audio is mixed to; this
can be a single mixer or multiple mixers at a time. The
obs_source_set_audio_mixers function sets the audio mixer which an audio
source applies to. For example, 0xF would mean that the source applies
to all four mixers.
Audio Encoders:
Audio encoders now must specify which specific audio mixer they use when
they encode audio data.
Outputs:
Outputs that use encoders can now support multiple audio tracks at once
if they have the OBS_OUTPUT_MULTI_TRACK capability flag set. This is
mostly only useful for certain types of RTMP transmissions, though may
be useful for file formats that support multiple audio tracks as well
later on.
2015-01-14 02:12:08 -08:00
|
|
|
}
|
2014-05-13 12:53:13 -07:00
|
|
|
}
|
|
|
|
|
2019-07-06 20:07:37 -07:00
|
|
|
clear_audio_buffers(output);
|
|
|
|
|
2016-06-20 17:09:21 -07:00
|
|
|
os_event_destroy(output->stopping_event);
|
2019-07-07 12:27:13 -07:00
|
|
|
pthread_mutex_destroy(&output->pause.mutex);
|
2016-11-17 05:25:23 -08:00
|
|
|
pthread_mutex_destroy(&output->caption_mutex);
|
2014-04-04 00:30:37 -07:00
|
|
|
pthread_mutex_destroy(&output->interleaved_mutex);
|
libobs: Add encoded output delay support
This feature allows a user to delay an output (as long as the output
itself supports it). Needless to say this intended for live streams,
where users may want to delay their streams to prevent stream sniping,
cheating, and other such things.
The design this time was a bit more elaborate, but still simple in
design: the user can now schedule stops/starts without having to wait
for the stream itself to stop before being able to take any action.
Optionally, they can also forcibly stop stream (and delay) in case
something happens which they might not want to be streamed.
Additionally, a new option was added to preserve stream cutoff point on
disconnections/reconnections, so that if you get disconnected while
streaming, when it reconnects, it will reconnect right at the point
where it left off. This will probably be quite useful for a number of
applications in addition to regular delay, such as setting the delay to
1 second and then using this feature to minimize, for example, a
critical stream such as a tournament stream from getting any of its
stream data cut off. However, using this feature will of course cause
the stream data to buffer and increase delay (and memory usage) while
it's in the process of reconnecting.
2015-09-06 15:39:46 -07:00
|
|
|
pthread_mutex_destroy(&output->delay_mutex);
|
2014-07-02 16:38:29 -07:00
|
|
|
os_event_destroy(output->reconnect_stop_event);
|
libobs: Add services API, reduce repeated code
Add API for streaming services. The services API simplifies the
creation of custom service features and user interface.
Custom streaming services later on will be able to do things such as:
- Be able to use service-specific APIs via modules, allowing a more
direct means of communicating with the service and requesting or
setting service-specific information
- Get URL/stream key via other means of authentication such as OAuth,
or be able to build custom URLs for services that require that sort
of thing.
- Query information (such as viewer count, chat, follower
notifications, and other information)
- Set channel information (such as current game, current channel title,
activating commercials)
Also, I reduce some repeated code that was used for all libobs objects.
This includes the name of the object, the private data, settings, as
well as the signal and procedure handlers.
I also switched to using linked lists for the global object lists,
rather than using an array of pointers (you could say it was..
pointless.) ..Anyway, the linked list info is also stored in the shared
context data structure.
2014-04-19 20:38:53 -07:00
|
|
|
obs_context_data_free(&output->context);
|
libobs: Add encoded output delay support
This feature allows a user to delay an output (as long as the output
itself supports it). Needless to say this intended for live streams,
where users may want to delay their streams to prevent stream sniping,
cheating, and other such things.
The design this time was a bit more elaborate, but still simple in
design: the user can now schedule stops/starts without having to wait
for the stream itself to stop before being able to take any action.
Optionally, they can also forcibly stop stream (and delay) in case
something happens which they might not want to be streamed.
Additionally, a new option was added to preserve stream cutoff point on
disconnections/reconnections, so that if you get disconnected while
streaming, when it reconnects, it will reconnect right at the point
where it left off. This will probably be quite useful for a number of
applications in addition to regular delay, such as setting the delay to
1 second and then using this feature to minimize, for example, a
critical stream such as a tournament stream from getting any of its
stream data cut off. However, using this feature will of course cause
the stream data to buffer and increase delay (and memory usage) while
it's in the process of reconnecting.
2015-09-06 15:39:46 -07:00
|
|
|
circlebuf_free(&output->delay_data);
|
2019-08-26 15:58:20 -07:00
|
|
|
circlebuf_free(&output->caption_data);
|
2015-09-13 11:55:06 -07:00
|
|
|
if (output->owns_info_id)
|
2019-06-22 22:13:45 -07:00
|
|
|
bfree((void *)output->info.id);
|
2017-05-15 03:04:11 -07:00
|
|
|
if (output->last_error_message)
|
|
|
|
bfree(output->last_error_message);
|
2013-09-30 19:37:13 -07:00
|
|
|
bfree(output);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-09-26 15:25:59 -07:00
|
|
|
const char *obs_output_get_name(const obs_output_t *output)
|
2014-07-01 16:29:38 -07:00
|
|
|
{
|
2019-06-22 22:13:45 -07:00
|
|
|
return obs_output_valid(output, "obs_output_get_name")
|
|
|
|
? output->context.name
|
|
|
|
: NULL;
|
2014-07-01 16:29:38 -07:00
|
|
|
}
|
|
|
|
|
libobs: Add encoded output delay support
This feature allows a user to delay an output (as long as the output
itself supports it). Needless to say this intended for live streams,
where users may want to delay their streams to prevent stream sniping,
cheating, and other such things.
The design this time was a bit more elaborate, but still simple in
design: the user can now schedule stops/starts without having to wait
for the stream itself to stop before being able to take any action.
Optionally, they can also forcibly stop stream (and delay) in case
something happens which they might not want to be streamed.
Additionally, a new option was added to preserve stream cutoff point on
disconnections/reconnections, so that if you get disconnected while
streaming, when it reconnects, it will reconnect right at the point
where it left off. This will probably be quite useful for a number of
applications in addition to regular delay, such as setting the delay to
1 second and then using this feature to minimize, for example, a
critical stream such as a tournament stream from getting any of its
stream data cut off. However, using this feature will of course cause
the stream data to buffer and increase delay (and memory usage) while
it's in the process of reconnecting.
2015-09-06 15:39:46 -07:00
|
|
|
bool obs_output_actual_start(obs_output_t *output)
|
2013-09-30 19:37:13 -07:00
|
|
|
{
|
2015-09-13 11:55:06 -07:00
|
|
|
bool success = false;
|
2014-08-24 17:32:44 -07:00
|
|
|
|
2016-06-20 17:09:21 -07:00
|
|
|
os_event_wait(output->stopping_event);
|
2016-06-11 11:42:29 -07:00
|
|
|
output->stop_code = 0;
|
2017-05-15 03:04:11 -07:00
|
|
|
if (output->last_error_message) {
|
|
|
|
bfree(output->last_error_message);
|
|
|
|
output->last_error_message = NULL;
|
|
|
|
}
|
2014-12-31 01:30:54 -08:00
|
|
|
|
2015-09-13 11:55:06 -07:00
|
|
|
if (output->context.data)
|
|
|
|
success = output->info.start(output->context.data);
|
2014-08-24 17:32:44 -07:00
|
|
|
|
|
|
|
if (success && output->video) {
|
|
|
|
output->starting_frame_count =
|
|
|
|
video_output_get_total_frames(output->video);
|
2016-01-25 03:58:51 -08:00
|
|
|
output->starting_drawn_count = obs->video.total_frames;
|
|
|
|
output->starting_lagged_count = obs->video.lagged_frames;
|
2014-08-24 17:32:44 -07:00
|
|
|
}
|
|
|
|
|
2016-06-20 16:07:29 -07:00
|
|
|
if (os_atomic_load_long(&output->delay_restart_refs))
|
libobs: Add encoded output delay support
This feature allows a user to delay an output (as long as the output
itself supports it). Needless to say this intended for live streams,
where users may want to delay their streams to prevent stream sniping,
cheating, and other such things.
The design this time was a bit more elaborate, but still simple in
design: the user can now schedule stops/starts without having to wait
for the stream itself to stop before being able to take any action.
Optionally, they can also forcibly stop stream (and delay) in case
something happens which they might not want to be streamed.
Additionally, a new option was added to preserve stream cutoff point on
disconnections/reconnections, so that if you get disconnected while
streaming, when it reconnects, it will reconnect right at the point
where it left off. This will probably be quite useful for a number of
applications in addition to regular delay, such as setting the delay to
1 second and then using this feature to minimize, for example, a
critical stream such as a tournament stream from getting any of its
stream data cut off. However, using this feature will of course cause
the stream data to buffer and increase delay (and memory usage) while
it's in the process of reconnecting.
2015-09-06 15:39:46 -07:00
|
|
|
os_atomic_dec_long(&output->delay_restart_refs);
|
|
|
|
|
2016-11-17 05:25:23 -08:00
|
|
|
output->caption_timestamp = 0;
|
2019-08-26 15:58:20 -07:00
|
|
|
|
|
|
|
circlebuf_free(&output->caption_data);
|
|
|
|
circlebuf_init(&output->caption_data);
|
|
|
|
|
2014-08-24 17:32:44 -07:00
|
|
|
return success;
|
|
|
|
}
|
|
|
|
|
libobs: Add encoded output delay support
This feature allows a user to delay an output (as long as the output
itself supports it). Needless to say this intended for live streams,
where users may want to delay their streams to prevent stream sniping,
cheating, and other such things.
The design this time was a bit more elaborate, but still simple in
design: the user can now schedule stops/starts without having to wait
for the stream itself to stop before being able to take any action.
Optionally, they can also forcibly stop stream (and delay) in case
something happens which they might not want to be streamed.
Additionally, a new option was added to preserve stream cutoff point on
disconnections/reconnections, so that if you get disconnected while
streaming, when it reconnects, it will reconnect right at the point
where it left off. This will probably be quite useful for a number of
applications in addition to regular delay, such as setting the delay to
1 second and then using this feature to minimize, for example, a
critical stream such as a tournament stream from getting any of its
stream data cut off. However, using this feature will of course cause
the stream data to buffer and increase delay (and memory usage) while
it's in the process of reconnecting.
2015-09-06 15:39:46 -07:00
|
|
|
bool obs_output_start(obs_output_t *output)
|
|
|
|
{
|
|
|
|
bool encoded;
|
2018-10-10 18:11:37 -07:00
|
|
|
bool has_service;
|
libobs: Add encoded output delay support
This feature allows a user to delay an output (as long as the output
itself supports it). Needless to say this intended for live streams,
where users may want to delay their streams to prevent stream sniping,
cheating, and other such things.
The design this time was a bit more elaborate, but still simple in
design: the user can now schedule stops/starts without having to wait
for the stream itself to stop before being able to take any action.
Optionally, they can also forcibly stop stream (and delay) in case
something happens which they might not want to be streamed.
Additionally, a new option was added to preserve stream cutoff point on
disconnections/reconnections, so that if you get disconnected while
streaming, when it reconnects, it will reconnect right at the point
where it left off. This will probably be quite useful for a number of
applications in addition to regular delay, such as setting the delay to
1 second and then using this feature to minimize, for example, a
critical stream such as a tournament stream from getting any of its
stream data cut off. However, using this feature will of course cause
the stream data to buffer and increase delay (and memory usage) while
it's in the process of reconnecting.
2015-09-06 15:39:46 -07:00
|
|
|
if (!obs_output_valid(output, "obs_output_start"))
|
|
|
|
return false;
|
2015-09-13 11:55:06 -07:00
|
|
|
if (!output->context.data)
|
|
|
|
return false;
|
libobs: Add encoded output delay support
This feature allows a user to delay an output (as long as the output
itself supports it). Needless to say this intended for live streams,
where users may want to delay their streams to prevent stream sniping,
cheating, and other such things.
The design this time was a bit more elaborate, but still simple in
design: the user can now schedule stops/starts without having to wait
for the stream itself to stop before being able to take any action.
Optionally, they can also forcibly stop stream (and delay) in case
something happens which they might not want to be streamed.
Additionally, a new option was added to preserve stream cutoff point on
disconnections/reconnections, so that if you get disconnected while
streaming, when it reconnects, it will reconnect right at the point
where it left off. This will probably be quite useful for a number of
applications in addition to regular delay, such as setting the delay to
1 second and then using this feature to minimize, for example, a
critical stream such as a tournament stream from getting any of its
stream data cut off. However, using this feature will of course cause
the stream data to buffer and increase delay (and memory usage) while
it's in the process of reconnecting.
2015-09-06 15:39:46 -07:00
|
|
|
|
2018-10-10 18:11:37 -07:00
|
|
|
has_service = (output->info.flags & OBS_OUTPUT_SERVICE) != 0;
|
|
|
|
if (has_service && !obs_service_initialize(output->service, output))
|
|
|
|
return false;
|
libobs: Add encoded output delay support
This feature allows a user to delay an output (as long as the output
itself supports it). Needless to say this intended for live streams,
where users may want to delay their streams to prevent stream sniping,
cheating, and other such things.
The design this time was a bit more elaborate, but still simple in
design: the user can now schedule stops/starts without having to wait
for the stream itself to stop before being able to take any action.
Optionally, they can also forcibly stop stream (and delay) in case
something happens which they might not want to be streamed.
Additionally, a new option was added to preserve stream cutoff point on
disconnections/reconnections, so that if you get disconnected while
streaming, when it reconnects, it will reconnect right at the point
where it left off. This will probably be quite useful for a number of
applications in addition to regular delay, such as setting the delay to
1 second and then using this feature to minimize, for example, a
critical stream such as a tournament stream from getting any of its
stream data cut off. However, using this feature will of course cause
the stream data to buffer and increase delay (and memory usage) while
it's in the process of reconnecting.
2015-09-06 15:39:46 -07:00
|
|
|
|
2018-10-10 18:11:37 -07:00
|
|
|
encoded = (output->info.flags & OBS_OUTPUT_ENCODED) != 0;
|
libobs: Add encoded output delay support
This feature allows a user to delay an output (as long as the output
itself supports it). Needless to say this intended for live streams,
where users may want to delay their streams to prevent stream sniping,
cheating, and other such things.
The design this time was a bit more elaborate, but still simple in
design: the user can now schedule stops/starts without having to wait
for the stream itself to stop before being able to take any action.
Optionally, they can also forcibly stop stream (and delay) in case
something happens which they might not want to be streamed.
Additionally, a new option was added to preserve stream cutoff point on
disconnections/reconnections, so that if you get disconnected while
streaming, when it reconnects, it will reconnect right at the point
where it left off. This will probably be quite useful for a number of
applications in addition to regular delay, such as setting the delay to
1 second and then using this feature to minimize, for example, a
critical stream such as a tournament stream from getting any of its
stream data cut off. However, using this feature will of course cause
the stream data to buffer and increase delay (and memory usage) while
it's in the process of reconnecting.
2015-09-06 15:39:46 -07:00
|
|
|
if (encoded && output->delay_sec) {
|
|
|
|
return obs_output_delay_start(output);
|
|
|
|
} else {
|
|
|
|
if (obs_output_actual_start(output)) {
|
|
|
|
do_output_signal(output, "starting");
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-06-11 11:42:29 -07:00
|
|
|
static inline bool data_active(struct obs_output *output)
|
|
|
|
{
|
|
|
|
return os_atomic_load_bool(&output->data_active);
|
|
|
|
}
|
|
|
|
|
2014-08-24 17:32:44 -07:00
|
|
|
static void log_frame_info(struct obs_output *output)
|
|
|
|
{
|
2016-01-25 03:58:51 -08:00
|
|
|
struct obs_core_video *video = &obs->video;
|
|
|
|
|
2019-06-22 22:13:45 -07:00
|
|
|
uint32_t drawn = video->total_frames - output->starting_drawn_count;
|
2017-08-02 13:59:24 -07:00
|
|
|
uint32_t lagged = video->lagged_frames - output->starting_lagged_count;
|
2016-01-25 03:58:51 -08:00
|
|
|
|
2014-10-12 19:22:04 -07:00
|
|
|
int dropped = obs_output_get_frames_dropped(output);
|
2017-08-02 14:25:06 -07:00
|
|
|
int total = output->total_frames;
|
2014-10-12 19:22:04 -07:00
|
|
|
|
2016-01-25 03:58:51 -08:00
|
|
|
double percentage_lagged = 0.0f;
|
|
|
|
double percentage_dropped = 0.0f;
|
|
|
|
|
|
|
|
if (drawn)
|
2019-06-22 22:13:45 -07:00
|
|
|
percentage_lagged = (double)lagged / (double)drawn * 100.0;
|
2017-08-02 14:25:06 -07:00
|
|
|
if (dropped)
|
|
|
|
percentage_dropped = (double)dropped / (double)total * 100.0;
|
2014-08-24 17:32:44 -07:00
|
|
|
|
|
|
|
blog(LOG_INFO, "Output '%s': stopping", output->context.name);
|
2017-08-02 14:25:06 -07:00
|
|
|
if (!dropped || !total)
|
|
|
|
blog(LOG_INFO, "Output '%s': Total frames output: %d",
|
2019-06-22 22:13:45 -07:00
|
|
|
output->context.name, total);
|
2017-08-02 14:25:06 -07:00
|
|
|
else
|
2019-06-22 22:13:45 -07:00
|
|
|
blog(LOG_INFO,
|
|
|
|
"Output '%s': Total frames output: %d"
|
|
|
|
" (%d attempted)",
|
|
|
|
output->context.name, total - dropped, total);
|
2017-08-02 14:25:06 -07:00
|
|
|
|
|
|
|
if (!lagged || !drawn)
|
2019-06-22 22:13:45 -07:00
|
|
|
blog(LOG_INFO, "Output '%s': Total drawn frames: %" PRIu32,
|
|
|
|
output->context.name, drawn);
|
2017-08-02 14:25:06 -07:00
|
|
|
else
|
2019-06-22 22:13:45 -07:00
|
|
|
blog(LOG_INFO,
|
|
|
|
"Output '%s': Total drawn frames: %" PRIu32 " (%" PRIu32
|
|
|
|
" attempted)",
|
|
|
|
output->context.name, drawn - lagged, drawn);
|
2014-08-24 17:32:44 -07:00
|
|
|
|
2016-01-25 03:58:51 -08:00
|
|
|
if (drawn && lagged)
|
2019-06-22 22:13:45 -07:00
|
|
|
blog(LOG_INFO,
|
|
|
|
"Output '%s': Number of lagged frames due "
|
|
|
|
"to rendering lag/stalls: %" PRIu32 " (%0.1f%%)",
|
|
|
|
output->context.name, lagged, percentage_lagged);
|
2017-08-02 13:59:24 -07:00
|
|
|
if (total && dropped)
|
2019-06-22 22:13:45 -07:00
|
|
|
blog(LOG_INFO,
|
|
|
|
"Output '%s': Number of dropped frames due "
|
|
|
|
"to insufficient bandwidth/connection stalls: "
|
|
|
|
"%d (%0.1f%%)",
|
|
|
|
output->context.name, dropped, percentage_dropped);
|
2013-09-30 19:37:13 -07:00
|
|
|
}
|
|
|
|
|
2016-06-11 11:42:29 -07:00
|
|
|
static inline void signal_stop(struct obs_output *output);
|
|
|
|
|
|
|
|
void obs_output_actual_stop(obs_output_t *output, bool force, uint64_t ts)
|
2013-09-30 19:37:13 -07:00
|
|
|
{
|
2016-06-11 11:42:29 -07:00
|
|
|
bool call_stop = true;
|
|
|
|
bool was_reconnecting = false;
|
|
|
|
|
2016-09-09 04:45:27 -07:00
|
|
|
if (stopping(output) && !force)
|
2016-06-20 17:09:21 -07:00
|
|
|
return;
|
2019-07-07 12:27:13 -07:00
|
|
|
|
|
|
|
obs_output_pause(output, false);
|
|
|
|
|
2016-06-20 17:09:21 -07:00
|
|
|
os_event_reset(output->stopping_event);
|
libobs: Add encoded output delay support
This feature allows a user to delay an output (as long as the output
itself supports it). Needless to say this intended for live streams,
where users may want to delay their streams to prevent stream sniping,
cheating, and other such things.
The design this time was a bit more elaborate, but still simple in
design: the user can now schedule stops/starts without having to wait
for the stream itself to stop before being able to take any action.
Optionally, they can also forcibly stop stream (and delay) in case
something happens which they might not want to be streamed.
Additionally, a new option was added to preserve stream cutoff point on
disconnections/reconnections, so that if you get disconnected while
streaming, when it reconnects, it will reconnect right at the point
where it left off. This will probably be quite useful for a number of
applications in addition to regular delay, such as setting the delay to
1 second and then using this feature to minimize, for example, a
critical stream such as a tournament stream from getting any of its
stream data cut off. However, using this feature will of course cause
the stream data to buffer and increase delay (and memory usage) while
it's in the process of reconnecting.
2015-09-06 15:39:46 -07:00
|
|
|
|
2016-06-11 11:42:29 -07:00
|
|
|
was_reconnecting = reconnecting(output) && !delay_active(output);
|
|
|
|
if (reconnecting(output)) {
|
|
|
|
os_event_signal(output->reconnect_stop_event);
|
|
|
|
if (output->reconnect_thread_active)
|
|
|
|
pthread_join(output->reconnect_thread, NULL);
|
|
|
|
}
|
libobs: Add encoded output delay support
This feature allows a user to delay an output (as long as the output
itself supports it). Needless to say this intended for live streams,
where users may want to delay their streams to prevent stream sniping,
cheating, and other such things.
The design this time was a bit more elaborate, but still simple in
design: the user can now schedule stops/starts without having to wait
for the stream itself to stop before being able to take any action.
Optionally, they can also forcibly stop stream (and delay) in case
something happens which they might not want to be streamed.
Additionally, a new option was added to preserve stream cutoff point on
disconnections/reconnections, so that if you get disconnected while
streaming, when it reconnects, it will reconnect right at the point
where it left off. This will probably be quite useful for a number of
applications in addition to regular delay, such as setting the delay to
1 second and then using this feature to minimize, for example, a
critical stream such as a tournament stream from getting any of its
stream data cut off. However, using this feature will of course cause
the stream data to buffer and increase delay (and memory usage) while
it's in the process of reconnecting.
2015-09-06 15:39:46 -07:00
|
|
|
|
2016-06-11 11:42:29 -07:00
|
|
|
if (force) {
|
|
|
|
if (delay_active(output)) {
|
|
|
|
call_stop = delay_capturing(output);
|
|
|
|
os_atomic_set_bool(&output->delay_active, false);
|
|
|
|
os_atomic_set_bool(&output->delay_capturing, false);
|
|
|
|
output->stop_code = OBS_OUTPUT_SUCCESS;
|
|
|
|
obs_output_end_data_capture(output);
|
|
|
|
os_event_signal(output->stopping_event);
|
|
|
|
} else {
|
2017-05-20 12:27:02 -07:00
|
|
|
call_stop = true;
|
2016-06-11 11:42:29 -07:00
|
|
|
}
|
|
|
|
} else {
|
2017-05-20 12:27:02 -07:00
|
|
|
call_stop = true;
|
2016-06-11 11:42:29 -07:00
|
|
|
}
|
2014-12-31 01:30:54 -08:00
|
|
|
|
2016-06-11 11:42:29 -07:00
|
|
|
if (output->context.data && call_stop) {
|
|
|
|
output->info.stop(output->context.data, ts);
|
2014-07-02 16:38:29 -07:00
|
|
|
|
2016-06-11 11:42:29 -07:00
|
|
|
} else if (was_reconnecting) {
|
|
|
|
output->stop_code = OBS_OUTPUT_SUCCESS;
|
|
|
|
signal_stop(output);
|
|
|
|
os_event_signal(output->stopping_event);
|
libobs: Add encoded output delay support
This feature allows a user to delay an output (as long as the output
itself supports it). Needless to say this intended for live streams,
where users may want to delay their streams to prevent stream sniping,
cheating, and other such things.
The design this time was a bit more elaborate, but still simple in
design: the user can now schedule stops/starts without having to wait
for the stream itself to stop before being able to take any action.
Optionally, they can also forcibly stop stream (and delay) in case
something happens which they might not want to be streamed.
Additionally, a new option was added to preserve stream cutoff point on
disconnections/reconnections, so that if you get disconnected while
streaming, when it reconnects, it will reconnect right at the point
where it left off. This will probably be quite useful for a number of
applications in addition to regular delay, such as setting the delay to
1 second and then using this feature to minimize, for example, a
critical stream such as a tournament stream from getting any of its
stream data cut off. However, using this feature will of course cause
the stream data to buffer and increase delay (and memory usage) while
it's in the process of reconnecting.
2015-09-06 15:39:46 -07:00
|
|
|
}
|
2016-11-17 05:25:23 -08:00
|
|
|
|
|
|
|
while (output->caption_head) {
|
|
|
|
output->caption_tail = output->caption_head->next;
|
|
|
|
bfree(output->caption_head);
|
|
|
|
output->caption_head = output->caption_tail;
|
|
|
|
}
|
libobs: Add encoded output delay support
This feature allows a user to delay an output (as long as the output
itself supports it). Needless to say this intended for live streams,
where users may want to delay their streams to prevent stream sniping,
cheating, and other such things.
The design this time was a bit more elaborate, but still simple in
design: the user can now schedule stops/starts without having to wait
for the stream itself to stop before being able to take any action.
Optionally, they can also forcibly stop stream (and delay) in case
something happens which they might not want to be streamed.
Additionally, a new option was added to preserve stream cutoff point on
disconnections/reconnections, so that if you get disconnected while
streaming, when it reconnects, it will reconnect right at the point
where it left off. This will probably be quite useful for a number of
applications in addition to regular delay, such as setting the delay to
1 second and then using this feature to minimize, for example, a
critical stream such as a tournament stream from getting any of its
stream data cut off. However, using this feature will of course cause
the stream data to buffer and increase delay (and memory usage) while
it's in the process of reconnecting.
2015-09-06 15:39:46 -07:00
|
|
|
}
|
2014-08-24 17:32:44 -07:00
|
|
|
|
libobs: Add encoded output delay support
This feature allows a user to delay an output (as long as the output
itself supports it). Needless to say this intended for live streams,
where users may want to delay their streams to prevent stream sniping,
cheating, and other such things.
The design this time was a bit more elaborate, but still simple in
design: the user can now schedule stops/starts without having to wait
for the stream itself to stop before being able to take any action.
Optionally, they can also forcibly stop stream (and delay) in case
something happens which they might not want to be streamed.
Additionally, a new option was added to preserve stream cutoff point on
disconnections/reconnections, so that if you get disconnected while
streaming, when it reconnects, it will reconnect right at the point
where it left off. This will probably be quite useful for a number of
applications in addition to regular delay, such as setting the delay to
1 second and then using this feature to minimize, for example, a
critical stream such as a tournament stream from getting any of its
stream data cut off. However, using this feature will of course cause
the stream data to buffer and increase delay (and memory usage) while
it's in the process of reconnecting.
2015-09-06 15:39:46 -07:00
|
|
|
void obs_output_stop(obs_output_t *output)
|
|
|
|
{
|
|
|
|
bool encoded;
|
|
|
|
if (!obs_output_valid(output, "obs_output_stop"))
|
|
|
|
return;
|
2015-09-13 11:55:06 -07:00
|
|
|
if (!output->context.data)
|
|
|
|
return;
|
2016-06-11 11:42:29 -07:00
|
|
|
if (!active(output) && !reconnecting(output))
|
|
|
|
return;
|
2016-06-21 04:10:25 -07:00
|
|
|
if (reconnecting(output)) {
|
|
|
|
obs_output_force_stop(output);
|
|
|
|
return;
|
|
|
|
}
|
libobs: Add encoded output delay support
This feature allows a user to delay an output (as long as the output
itself supports it). Needless to say this intended for live streams,
where users may want to delay their streams to prevent stream sniping,
cheating, and other such things.
The design this time was a bit more elaborate, but still simple in
design: the user can now schedule stops/starts without having to wait
for the stream itself to stop before being able to take any action.
Optionally, they can also forcibly stop stream (and delay) in case
something happens which they might not want to be streamed.
Additionally, a new option was added to preserve stream cutoff point on
disconnections/reconnections, so that if you get disconnected while
streaming, when it reconnects, it will reconnect right at the point
where it left off. This will probably be quite useful for a number of
applications in addition to regular delay, such as setting the delay to
1 second and then using this feature to minimize, for example, a
critical stream such as a tournament stream from getting any of its
stream data cut off. However, using this feature will of course cause
the stream data to buffer and increase delay (and memory usage) while
it's in the process of reconnecting.
2015-09-06 15:39:46 -07:00
|
|
|
|
|
|
|
encoded = (output->info.flags & OBS_OUTPUT_ENCODED) != 0;
|
|
|
|
|
|
|
|
if (encoded && output->active_delay_ns) {
|
|
|
|
obs_output_delay_stop(output);
|
2016-06-11 11:42:29 -07:00
|
|
|
|
2016-06-20 17:09:21 -07:00
|
|
|
} else if (!stopping(output)) {
|
libobs: Add encoded output delay support
This feature allows a user to delay an output (as long as the output
itself supports it). Needless to say this intended for live streams,
where users may want to delay their streams to prevent stream sniping,
cheating, and other such things.
The design this time was a bit more elaborate, but still simple in
design: the user can now schedule stops/starts without having to wait
for the stream itself to stop before being able to take any action.
Optionally, they can also forcibly stop stream (and delay) in case
something happens which they might not want to be streamed.
Additionally, a new option was added to preserve stream cutoff point on
disconnections/reconnections, so that if you get disconnected while
streaming, when it reconnects, it will reconnect right at the point
where it left off. This will probably be quite useful for a number of
applications in addition to regular delay, such as setting the delay to
1 second and then using this feature to minimize, for example, a
critical stream such as a tournament stream from getting any of its
stream data cut off. However, using this feature will of course cause
the stream data to buffer and increase delay (and memory usage) while
it's in the process of reconnecting.
2015-09-06 15:39:46 -07:00
|
|
|
do_output_signal(output, "stopping");
|
2016-06-11 11:42:29 -07:00
|
|
|
obs_output_actual_stop(output, false, os_gettime_ns());
|
Implement RTMP module (still needs drop code)
- Implement the RTMP output module. This time around, we just use a
simple FLV muxer, then just write to the stream with RTMP_Write.
Easy and effective.
- Fix the FLV muxer, the muxer now outputs proper FLV packets.
- Output API:
* When using encoders, automatically interleave encoded packets
before sending it to the output.
* Pair encoders and have them automatically wait for the other to
start to ensure sync.
* Change 'obs_output_signal_start_fail' to 'obs_output_signal_stop'
because it was a bit confusing, and doing this makes a lot more
sense for outputs that need to stop suddenly (disconnections/etc).
- Encoder API:
* Remove some unnecessary encoder functions from the actual API and
make them internal. Most of the encoder functions are handled
automatically by outputs anyway, so there's no real need to expose
them and end up inadvertently confusing plugin writers.
* Have audio encoders wait for the video encoder to get a frame, then
start at the exact data point that the first video frame starts to
ensure the most accrate sync of video/audio possible.
* Add a required 'frame_size' callback for audio encoders that
returns the expected number of frames desired to encode with. This
way, the libobs encoder API can handle the circular buffering
internally automatically for the encoder modules, so encoder
writers don't have to do it themselves.
- Fix a few bugs in the serializer interface. It was passing the wrong
variable for the data in a few cases.
- If a source has video, make obs_source_update defer the actual update
callback until the tick function is called to prevent threading
issues.
2014-04-07 22:00:10 -07:00
|
|
|
}
|
2013-09-30 19:37:13 -07:00
|
|
|
}
|
|
|
|
|
libobs: Add encoded output delay support
This feature allows a user to delay an output (as long as the output
itself supports it). Needless to say this intended for live streams,
where users may want to delay their streams to prevent stream sniping,
cheating, and other such things.
The design this time was a bit more elaborate, but still simple in
design: the user can now schedule stops/starts without having to wait
for the stream itself to stop before being able to take any action.
Optionally, they can also forcibly stop stream (and delay) in case
something happens which they might not want to be streamed.
Additionally, a new option was added to preserve stream cutoff point on
disconnections/reconnections, so that if you get disconnected while
streaming, when it reconnects, it will reconnect right at the point
where it left off. This will probably be quite useful for a number of
applications in addition to regular delay, such as setting the delay to
1 second and then using this feature to minimize, for example, a
critical stream such as a tournament stream from getting any of its
stream data cut off. However, using this feature will of course cause
the stream data to buffer and increase delay (and memory usage) while
it's in the process of reconnecting.
2015-09-06 15:39:46 -07:00
|
|
|
void obs_output_force_stop(obs_output_t *output)
|
|
|
|
{
|
2016-06-11 11:42:29 -07:00
|
|
|
if (!obs_output_valid(output, "obs_output_force_stop"))
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (!stopping(output)) {
|
|
|
|
output->stop_code = 0;
|
|
|
|
do_output_signal(output, "stopping");
|
|
|
|
}
|
2016-09-09 04:45:27 -07:00
|
|
|
obs_output_actual_stop(output, true, 0);
|
libobs: Add encoded output delay support
This feature allows a user to delay an output (as long as the output
itself supports it). Needless to say this intended for live streams,
where users may want to delay their streams to prevent stream sniping,
cheating, and other such things.
The design this time was a bit more elaborate, but still simple in
design: the user can now schedule stops/starts without having to wait
for the stream itself to stop before being able to take any action.
Optionally, they can also forcibly stop stream (and delay) in case
something happens which they might not want to be streamed.
Additionally, a new option was added to preserve stream cutoff point on
disconnections/reconnections, so that if you get disconnected while
streaming, when it reconnects, it will reconnect right at the point
where it left off. This will probably be quite useful for a number of
applications in addition to regular delay, such as setting the delay to
1 second and then using this feature to minimize, for example, a
critical stream such as a tournament stream from getting any of its
stream data cut off. However, using this feature will of course cause
the stream data to buffer and increase delay (and memory usage) while
it's in the process of reconnecting.
2015-09-06 15:39:46 -07:00
|
|
|
}
|
|
|
|
|
2014-09-26 15:25:59 -07:00
|
|
|
bool obs_output_active(const obs_output_t *output)
|
2013-09-30 19:37:13 -07:00
|
|
|
{
|
2019-06-22 22:13:45 -07:00
|
|
|
return (output != NULL) ? (active(output) || reconnecting(output))
|
|
|
|
: false;
|
2013-09-30 19:37:13 -07:00
|
|
|
}
|
|
|
|
|
2018-04-18 10:10:30 -07:00
|
|
|
uint32_t obs_output_get_flags(const obs_output_t *output)
|
|
|
|
{
|
2019-06-22 22:13:45 -07:00
|
|
|
return obs_output_valid(output, "obs_output_get_flags")
|
|
|
|
? output->info.flags
|
|
|
|
: 0;
|
2018-04-18 10:10:30 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
uint32_t obs_get_output_flags(const char *id)
|
|
|
|
{
|
|
|
|
const struct obs_output_info *info = find_output(id);
|
|
|
|
return info ? info->flags : 0;
|
|
|
|
}
|
|
|
|
|
2014-09-25 17:44:05 -07:00
|
|
|
static inline obs_data_t *get_defaults(const struct obs_output_info *info)
|
2014-04-04 00:30:37 -07:00
|
|
|
{
|
2014-09-25 17:44:05 -07:00
|
|
|
obs_data_t *settings = obs_data_create();
|
2014-08-04 21:27:52 -07:00
|
|
|
if (info->get_defaults)
|
|
|
|
info->get_defaults(settings);
|
2014-04-04 00:30:37 -07:00
|
|
|
return settings;
|
|
|
|
}
|
|
|
|
|
2014-09-25 17:44:05 -07:00
|
|
|
obs_data_t *obs_output_defaults(const char *id)
|
2014-03-07 05:55:21 -08:00
|
|
|
{
|
|
|
|
const struct obs_output_info *info = find_output(id);
|
2014-04-04 00:30:37 -07:00
|
|
|
return (info) ? get_defaults(info) : NULL;
|
2014-03-07 05:55:21 -08:00
|
|
|
}
|
|
|
|
|
2014-09-25 17:44:05 -07:00
|
|
|
obs_properties_t *obs_get_output_properties(const char *id)
|
2014-02-01 21:46:13 -08:00
|
|
|
{
|
Revamp API and start using doxygen
The API used to be designed in such a way to where it would expect
exports for each individual source/output/encoder/etc. You would export
functions for each and it would automatically load those functions based
on a specific naming scheme from the module.
The idea behind this was that I wanted to limit the usage of structures
in the API so only functions could be used. It was an interesting idea
in theory, but this idea turned out to be flawed in a number of ways:
1.) Requiring exports to create sources/outputs/encoders/etc meant that
you could not create them by any other means, which meant that
things like faruton's .net plugin would become difficult.
2.) Export function declarations could not be checked, therefore if you
created a function with the wrong parameters and parameter types,
the compiler wouldn't know how to check for that.
3.) Required overly complex load functions in libobs just to handle it.
It makes much more sense to just have a load function that you call
manually. Complexity is the bane of all good programs.
4.) It required that you have functions of specific names, which looked
and felt somewhat unsightly.
So, to fix these issues, I replaced it with a more commonly used API
scheme, seen commonly in places like kernels and typical C libraries
with abstraction. You simply create a structure that contains the
callback definitions, and you pass it to a function to register that
definition (such as obs_register_source), which you call in the
obs_module_load of the module.
It will also automatically check the structure size and ensure that it
only loads the required values if the structure happened to add new
values in an API change.
The "main" source file for each module must include obs-module.h, and
must use OBS_DECLARE_MODULE() within that source file.
Also, started writing some doxygen documentation in to the main library
headers. Will add more detailed documentation as I go.
2014-02-12 07:04:50 -08:00
|
|
|
const struct obs_output_info *info = find_output(id);
|
2014-08-04 21:27:52 -07:00
|
|
|
if (info && info->get_properties) {
|
2019-06-22 22:13:45 -07:00
|
|
|
obs_data_t *defaults = get_defaults(info);
|
2014-09-25 17:44:05 -07:00
|
|
|
obs_properties_t *properties;
|
2014-04-04 00:30:37 -07:00
|
|
|
|
2014-09-29 08:36:13 -07:00
|
|
|
properties = info->get_properties(NULL);
|
2014-04-04 00:30:37 -07:00
|
|
|
obs_properties_apply_settings(properties, defaults);
|
|
|
|
obs_data_release(defaults);
|
|
|
|
return properties;
|
|
|
|
}
|
2014-02-01 21:46:13 -08:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2014-09-26 15:25:59 -07:00
|
|
|
obs_properties_t *obs_output_properties(const obs_output_t *output)
|
2014-03-23 01:07:54 -07:00
|
|
|
{
|
2015-10-17 02:51:13 -07:00
|
|
|
if (!obs_output_valid(output, "obs_output_properties"))
|
|
|
|
return NULL;
|
|
|
|
|
2014-08-04 21:27:52 -07:00
|
|
|
if (output && output->info.get_properties) {
|
2014-09-25 17:44:05 -07:00
|
|
|
obs_properties_t *props;
|
2014-09-29 08:36:13 -07:00
|
|
|
props = output->info.get_properties(output->context.data);
|
libobs: Add services API, reduce repeated code
Add API for streaming services. The services API simplifies the
creation of custom service features and user interface.
Custom streaming services later on will be able to do things such as:
- Be able to use service-specific APIs via modules, allowing a more
direct means of communicating with the service and requesting or
setting service-specific information
- Get URL/stream key via other means of authentication such as OAuth,
or be able to build custom URLs for services that require that sort
of thing.
- Query information (such as viewer count, chat, follower
notifications, and other information)
- Set channel information (such as current game, current channel title,
activating commercials)
Also, I reduce some repeated code that was used for all libobs objects.
This includes the name of the object, the private data, settings, as
well as the signal and procedure handlers.
I also switched to using linked lists for the global object lists,
rather than using an array of pointers (you could say it was..
pointless.) ..Anyway, the linked list info is also stored in the shared
context data structure.
2014-04-19 20:38:53 -07:00
|
|
|
obs_properties_apply_settings(props, output->context.settings);
|
2014-04-04 00:30:37 -07:00
|
|
|
return props;
|
|
|
|
}
|
|
|
|
|
2014-03-23 01:07:54 -07:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2014-09-25 17:44:05 -07:00
|
|
|
void obs_output_update(obs_output_t *output, obs_data_t *settings)
|
2013-09-30 19:37:13 -07:00
|
|
|
{
|
2015-10-17 02:51:13 -07:00
|
|
|
if (!obs_output_valid(output, "obs_output_update"))
|
|
|
|
return;
|
2014-02-23 21:39:33 -08:00
|
|
|
|
libobs: Add services API, reduce repeated code
Add API for streaming services. The services API simplifies the
creation of custom service features and user interface.
Custom streaming services later on will be able to do things such as:
- Be able to use service-specific APIs via modules, allowing a more
direct means of communicating with the service and requesting or
setting service-specific information
- Get URL/stream key via other means of authentication such as OAuth,
or be able to build custom URLs for services that require that sort
of thing.
- Query information (such as viewer count, chat, follower
notifications, and other information)
- Set channel information (such as current game, current channel title,
activating commercials)
Also, I reduce some repeated code that was used for all libobs objects.
This includes the name of the object, the private data, settings, as
well as the signal and procedure handlers.
I also switched to using linked lists for the global object lists,
rather than using an array of pointers (you could say it was..
pointless.) ..Anyway, the linked list info is also stored in the shared
context data structure.
2014-04-19 20:38:53 -07:00
|
|
|
obs_data_apply(output->context.settings, settings);
|
2014-01-28 14:45:30 -08:00
|
|
|
|
Revamp API and start using doxygen
The API used to be designed in such a way to where it would expect
exports for each individual source/output/encoder/etc. You would export
functions for each and it would automatically load those functions based
on a specific naming scheme from the module.
The idea behind this was that I wanted to limit the usage of structures
in the API so only functions could be used. It was an interesting idea
in theory, but this idea turned out to be flawed in a number of ways:
1.) Requiring exports to create sources/outputs/encoders/etc meant that
you could not create them by any other means, which meant that
things like faruton's .net plugin would become difficult.
2.) Export function declarations could not be checked, therefore if you
created a function with the wrong parameters and parameter types,
the compiler wouldn't know how to check for that.
3.) Required overly complex load functions in libobs just to handle it.
It makes much more sense to just have a load function that you call
manually. Complexity is the bane of all good programs.
4.) It required that you have functions of specific names, which looked
and felt somewhat unsightly.
So, to fix these issues, I replaced it with a more commonly used API
scheme, seen commonly in places like kernels and typical C libraries
with abstraction. You simply create a structure that contains the
callback definitions, and you pass it to a function to register that
definition (such as obs_register_source), which you call in the
obs_module_load of the module.
It will also automatically check the structure size and ensure that it
only loads the required values if the structure happened to add new
values in an API change.
The "main" source file for each module must include obs-module.h, and
must use OBS_DECLARE_MODULE() within that source file.
Also, started writing some doxygen documentation in to the main library
headers. Will add more detailed documentation as I go.
2014-02-12 07:04:50 -08:00
|
|
|
if (output->info.update)
|
libobs: Add services API, reduce repeated code
Add API for streaming services. The services API simplifies the
creation of custom service features and user interface.
Custom streaming services later on will be able to do things such as:
- Be able to use service-specific APIs via modules, allowing a more
direct means of communicating with the service and requesting or
setting service-specific information
- Get URL/stream key via other means of authentication such as OAuth,
or be able to build custom URLs for services that require that sort
of thing.
- Query information (such as viewer count, chat, follower
notifications, and other information)
- Set channel information (such as current game, current channel title,
activating commercials)
Also, I reduce some repeated code that was used for all libobs objects.
This includes the name of the object, the private data, settings, as
well as the signal and procedure handlers.
I also switched to using linked lists for the global object lists,
rather than using an array of pointers (you could say it was..
pointless.) ..Anyway, the linked list info is also stored in the shared
context data structure.
2014-04-19 20:38:53 -07:00
|
|
|
output->info.update(output->context.data,
|
2019-06-22 22:13:45 -07:00
|
|
|
output->context.settings);
|
2013-09-30 19:37:13 -07:00
|
|
|
}
|
|
|
|
|
2014-09-26 15:25:59 -07:00
|
|
|
obs_data_t *obs_output_get_settings(const obs_output_t *output)
|
2014-02-10 09:22:35 -08:00
|
|
|
{
|
2015-10-17 02:51:13 -07:00
|
|
|
if (!obs_output_valid(output, "obs_output_get_settings"))
|
2014-02-10 09:22:35 -08:00
|
|
|
return NULL;
|
|
|
|
|
libobs: Add services API, reduce repeated code
Add API for streaming services. The services API simplifies the
creation of custom service features and user interface.
Custom streaming services later on will be able to do things such as:
- Be able to use service-specific APIs via modules, allowing a more
direct means of communicating with the service and requesting or
setting service-specific information
- Get URL/stream key via other means of authentication such as OAuth,
or be able to build custom URLs for services that require that sort
of thing.
- Query information (such as viewer count, chat, follower
notifications, and other information)
- Set channel information (such as current game, current channel title,
activating commercials)
Also, I reduce some repeated code that was used for all libobs objects.
This includes the name of the object, the private data, settings, as
well as the signal and procedure handlers.
I also switched to using linked lists for the global object lists,
rather than using an array of pointers (you could say it was..
pointless.) ..Anyway, the linked list info is also stored in the shared
context data structure.
2014-04-19 20:38:53 -07:00
|
|
|
obs_data_addref(output->context.settings);
|
|
|
|
return output->context.settings;
|
2014-02-10 09:22:35 -08:00
|
|
|
}
|
|
|
|
|
2015-10-21 07:32:01 -07:00
|
|
|
bool obs_output_can_pause(const obs_output_t *output)
|
2013-09-30 19:37:13 -07:00
|
|
|
{
|
2019-06-22 22:13:45 -07:00
|
|
|
return obs_output_valid(output, "obs_output_can_pause")
|
2019-07-07 12:27:13 -07:00
|
|
|
? !!(output->info.flags & OBS_OUTPUT_CAN_PAUSE)
|
2019-06-22 22:13:45 -07:00
|
|
|
: false;
|
2013-09-30 19:37:13 -07:00
|
|
|
}
|
|
|
|
|
2019-07-07 12:27:13 -07:00
|
|
|
static inline void end_pause(struct pause_data *pause, uint64_t ts)
|
|
|
|
{
|
|
|
|
if (!pause->ts_end) {
|
|
|
|
pause->ts_end = ts;
|
|
|
|
pause->ts_offset += pause->ts_end - pause->ts_start;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline uint64_t get_closest_v_ts(struct pause_data *pause)
|
|
|
|
{
|
|
|
|
uint64_t interval = obs->video.video_frame_interval_ns;
|
2019-08-31 00:52:36 -07:00
|
|
|
uint64_t i2 = interval * 2;
|
2019-07-07 12:27:13 -07:00
|
|
|
uint64_t ts = os_gettime_ns();
|
|
|
|
|
|
|
|
return pause->last_video_ts +
|
2019-08-31 00:52:36 -07:00
|
|
|
((ts - pause->last_video_ts + i2) / interval) * interval;
|
2019-07-07 12:27:13 -07:00
|
|
|
}
|
|
|
|
|
2019-08-31 00:59:48 -07:00
|
|
|
static inline bool pause_can_start(struct pause_data *pause)
|
|
|
|
{
|
|
|
|
return !pause->ts_start && !pause->ts_end;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline bool pause_can_stop(struct pause_data *pause)
|
|
|
|
{
|
|
|
|
return !!pause->ts_start && !pause->ts_end;
|
|
|
|
}
|
|
|
|
|
2019-07-07 12:27:13 -07:00
|
|
|
static bool obs_encoded_output_pause(obs_output_t *output, bool pause)
|
2013-09-30 19:37:13 -07:00
|
|
|
{
|
2019-07-07 12:27:13 -07:00
|
|
|
obs_encoder_t *venc;
|
|
|
|
obs_encoder_t *aenc[MAX_AUDIO_MIXES];
|
|
|
|
uint64_t closest_v_ts;
|
2019-08-31 00:59:48 -07:00
|
|
|
bool success = false;
|
2019-07-07 12:27:13 -07:00
|
|
|
|
|
|
|
venc = output->video_encoder;
|
|
|
|
for (size_t i = 0; i < MAX_AUDIO_MIXES; i++)
|
|
|
|
aenc[i] = output->audio_encoders[i];
|
|
|
|
|
|
|
|
pthread_mutex_lock(&venc->pause.mutex);
|
|
|
|
for (size_t i = 0; i < MAX_AUDIO_MIXES; i++) {
|
|
|
|
if (aenc[i]) {
|
|
|
|
pthread_mutex_lock(&aenc[i]->pause.mutex);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* ---------------------------- */
|
|
|
|
|
|
|
|
closest_v_ts = get_closest_v_ts(&venc->pause);
|
|
|
|
|
|
|
|
if (pause) {
|
2019-08-31 00:59:48 -07:00
|
|
|
if (!pause_can_start(&venc->pause)) {
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
for (size_t i = 0; i < MAX_AUDIO_MIXES; i++) {
|
|
|
|
if (aenc[i] && !pause_can_start(&aenc[i]->pause)) {
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-07-07 12:27:13 -07:00
|
|
|
os_atomic_set_bool(&venc->paused, true);
|
|
|
|
venc->pause.ts_start = closest_v_ts;
|
|
|
|
|
|
|
|
for (size_t i = 0; i < MAX_AUDIO_MIXES; i++) {
|
|
|
|
if (aenc[i]) {
|
|
|
|
os_atomic_set_bool(&aenc[i]->paused, true);
|
|
|
|
aenc[i]->pause.ts_start = closest_v_ts;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
2019-08-31 00:59:48 -07:00
|
|
|
if (!pause_can_stop(&venc->pause)) {
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
for (size_t i = 0; i < MAX_AUDIO_MIXES; i++) {
|
|
|
|
if (aenc[i] && !pause_can_stop(&aenc[i]->pause)) {
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-07-07 12:27:13 -07:00
|
|
|
os_atomic_set_bool(&venc->paused, false);
|
|
|
|
end_pause(&venc->pause, closest_v_ts);
|
|
|
|
|
|
|
|
for (size_t i = 0; i < MAX_AUDIO_MIXES; i++) {
|
|
|
|
if (aenc[i]) {
|
|
|
|
os_atomic_set_bool(&aenc[i]->paused, false);
|
|
|
|
end_pause(&aenc[i]->pause, closest_v_ts);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* ---------------------------- */
|
|
|
|
|
2019-08-31 00:59:48 -07:00
|
|
|
success = true;
|
|
|
|
|
|
|
|
fail:
|
2019-07-07 12:27:13 -07:00
|
|
|
for (size_t i = MAX_AUDIO_MIXES; i > 0; i--) {
|
|
|
|
if (aenc[i - 1]) {
|
|
|
|
pthread_mutex_unlock(&aenc[i - 1]->pause.mutex);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
pthread_mutex_unlock(&venc->pause.mutex);
|
|
|
|
|
2019-08-31 00:59:48 -07:00
|
|
|
return success;
|
2019-07-07 12:27:13 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
static bool obs_raw_output_pause(obs_output_t *output, bool pause)
|
|
|
|
{
|
|
|
|
bool success;
|
|
|
|
uint64_t closest_v_ts;
|
|
|
|
|
|
|
|
pthread_mutex_lock(&output->pause.mutex);
|
|
|
|
closest_v_ts = get_closest_v_ts(&output->pause);
|
|
|
|
if (pause) {
|
2019-08-31 00:59:48 -07:00
|
|
|
success = pause_can_start(&output->pause);
|
2019-07-07 12:27:13 -07:00
|
|
|
if (success)
|
|
|
|
output->pause.ts_start = closest_v_ts;
|
|
|
|
} else {
|
2019-08-31 00:59:48 -07:00
|
|
|
success = pause_can_stop(&output->pause);
|
2019-07-07 12:27:13 -07:00
|
|
|
if (success)
|
|
|
|
end_pause(&output->pause, closest_v_ts);
|
|
|
|
}
|
|
|
|
pthread_mutex_unlock(&output->pause.mutex);
|
|
|
|
|
|
|
|
return success;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool obs_output_pause(obs_output_t *output, bool pause)
|
|
|
|
{
|
|
|
|
bool success;
|
|
|
|
|
2015-10-17 02:51:13 -07:00
|
|
|
if (!obs_output_valid(output, "obs_output_pause"))
|
2019-07-07 12:27:13 -07:00
|
|
|
return false;
|
|
|
|
if ((output->info.flags & OBS_OUTPUT_CAN_PAUSE) == 0)
|
|
|
|
return false;
|
|
|
|
if (!os_atomic_load_bool(&output->active))
|
|
|
|
return false;
|
|
|
|
if (os_atomic_load_bool(&output->paused) == pause)
|
|
|
|
return true;
|
|
|
|
|
|
|
|
success = ((output->info.flags & OBS_OUTPUT_ENCODED) != 0)
|
|
|
|
? obs_encoded_output_pause(output, pause)
|
|
|
|
: obs_raw_output_pause(output, pause);
|
|
|
|
if (success) {
|
|
|
|
os_atomic_set_bool(&output->paused, pause);
|
|
|
|
do_output_signal(output, pause ? "pause" : "unpause");
|
|
|
|
|
|
|
|
blog(LOG_INFO, "output %s %spaused", output->context.name,
|
|
|
|
pause ? "" : "un");
|
|
|
|
}
|
|
|
|
return success;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool obs_output_paused(const obs_output_t *output)
|
|
|
|
{
|
|
|
|
return obs_output_valid(output, "obs_output_paused")
|
|
|
|
? os_atomic_load_bool(&output->paused)
|
|
|
|
: false;
|
|
|
|
}
|
|
|
|
|
|
|
|
uint64_t obs_output_get_pause_offset(obs_output_t *output)
|
|
|
|
{
|
|
|
|
uint64_t offset;
|
2015-10-17 02:51:13 -07:00
|
|
|
|
2019-07-07 12:27:13 -07:00
|
|
|
if (!obs_output_valid(output, "obs_output_get_pause_offset"))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
pthread_mutex_lock(&output->pause.mutex);
|
|
|
|
offset = output->pause.ts_offset;
|
|
|
|
pthread_mutex_unlock(&output->pause.mutex);
|
|
|
|
|
|
|
|
return offset;
|
2013-09-30 19:37:13 -07:00
|
|
|
}
|
2014-03-10 13:10:35 -07:00
|
|
|
|
2014-09-26 15:25:59 -07:00
|
|
|
signal_handler_t *obs_output_get_signal_handler(const obs_output_t *output)
|
2014-03-10 13:10:35 -07:00
|
|
|
{
|
2019-06-22 22:13:45 -07:00
|
|
|
return obs_output_valid(output, "obs_output_get_signal_handler")
|
|
|
|
? output->context.signals
|
|
|
|
: NULL;
|
2014-03-10 13:10:35 -07:00
|
|
|
}
|
|
|
|
|
2014-09-26 15:25:59 -07:00
|
|
|
proc_handler_t *obs_output_get_proc_handler(const obs_output_t *output)
|
2014-03-10 13:10:35 -07:00
|
|
|
{
|
2019-06-22 22:13:45 -07:00
|
|
|
return obs_output_valid(output, "obs_output_get_proc_handler")
|
|
|
|
? output->context.procs
|
|
|
|
: NULL;
|
2014-03-10 13:10:35 -07:00
|
|
|
}
|
Implement encoder usage with outputs
- Make it so that encoders can be assigned to outputs. If an encoder
is destroyed, it will automatically remove itself from that output.
I specifically didn't want to do reference counting because it leaves
too much potential for unchecked references and it just felt like it
would be more trouble than it's worth.
- Add a 'flags' value to the output definition structure. This lets
the output specify if it uses video/audio, and whether the output is
meant to be used with OBS encoders or not.
- Remove boilerplate code for outputs. This makes it easier to program
outputs. The boilerplate code involved before was mostly just
involving connecting to the audio/video data streams directly in each
output plugin.
Instead of doing that, simply add plugin callback functions for
receiving video/audio (either encoded or non-encoded, whichever it's
set to use), and then call obs_output_begin_data_capture and
obs_output_end_data_capture to automatically handle setting up
connections to raw or encoded video/audio streams for the plugin.
- Remove 'active' function from output callbacks, as it's no longer
really needed now that the libobs output context automatically knows
when the output is active or not.
- Make it so that an encoder cannot be destroyed until all data
connections to the encoder have been removed.
- Change the 'start' and 'stop' functions in the encoder interface to
just an 'initialize' callback, which initializes the encoder.
- Make it so that the encoder must be initialized first before the data
stream can be started. The reason why initialization was separated
from starting the encoder stream was because we need to be able to
check that the settings used with the encoder *can* be used first.
This problem was especially annoying if you had both video/audio
encoding. Before, you'd have to check the return value from
obs_encoder_start, and if that second encoder fails, then you
basically had to stop the first encoder again, making for
unnecessary boilerplate code whenever starting up two encoders.
2014-03-27 21:50:15 -07:00
|
|
|
|
2014-09-25 17:44:05 -07:00
|
|
|
void obs_output_set_media(obs_output_t *output, video_t *video, audio_t *audio)
|
Implement encoder usage with outputs
- Make it so that encoders can be assigned to outputs. If an encoder
is destroyed, it will automatically remove itself from that output.
I specifically didn't want to do reference counting because it leaves
too much potential for unchecked references and it just felt like it
would be more trouble than it's worth.
- Add a 'flags' value to the output definition structure. This lets
the output specify if it uses video/audio, and whether the output is
meant to be used with OBS encoders or not.
- Remove boilerplate code for outputs. This makes it easier to program
outputs. The boilerplate code involved before was mostly just
involving connecting to the audio/video data streams directly in each
output plugin.
Instead of doing that, simply add plugin callback functions for
receiving video/audio (either encoded or non-encoded, whichever it's
set to use), and then call obs_output_begin_data_capture and
obs_output_end_data_capture to automatically handle setting up
connections to raw or encoded video/audio streams for the plugin.
- Remove 'active' function from output callbacks, as it's no longer
really needed now that the libobs output context automatically knows
when the output is active or not.
- Make it so that an encoder cannot be destroyed until all data
connections to the encoder have been removed.
- Change the 'start' and 'stop' functions in the encoder interface to
just an 'initialize' callback, which initializes the encoder.
- Make it so that the encoder must be initialized first before the data
stream can be started. The reason why initialization was separated
from starting the encoder stream was because we need to be able to
check that the settings used with the encoder *can* be used first.
This problem was especially annoying if you had both video/audio
encoding. Before, you'd have to check the return value from
obs_encoder_start, and if that second encoder fails, then you
basically had to stop the first encoder again, making for
unnecessary boilerplate code whenever starting up two encoders.
2014-03-27 21:50:15 -07:00
|
|
|
{
|
2015-10-17 02:51:13 -07:00
|
|
|
if (!obs_output_valid(output, "obs_output_set_media"))
|
Implement encoder usage with outputs
- Make it so that encoders can be assigned to outputs. If an encoder
is destroyed, it will automatically remove itself from that output.
I specifically didn't want to do reference counting because it leaves
too much potential for unchecked references and it just felt like it
would be more trouble than it's worth.
- Add a 'flags' value to the output definition structure. This lets
the output specify if it uses video/audio, and whether the output is
meant to be used with OBS encoders or not.
- Remove boilerplate code for outputs. This makes it easier to program
outputs. The boilerplate code involved before was mostly just
involving connecting to the audio/video data streams directly in each
output plugin.
Instead of doing that, simply add plugin callback functions for
receiving video/audio (either encoded or non-encoded, whichever it's
set to use), and then call obs_output_begin_data_capture and
obs_output_end_data_capture to automatically handle setting up
connections to raw or encoded video/audio streams for the plugin.
- Remove 'active' function from output callbacks, as it's no longer
really needed now that the libobs output context automatically knows
when the output is active or not.
- Make it so that an encoder cannot be destroyed until all data
connections to the encoder have been removed.
- Change the 'start' and 'stop' functions in the encoder interface to
just an 'initialize' callback, which initializes the encoder.
- Make it so that the encoder must be initialized first before the data
stream can be started. The reason why initialization was separated
from starting the encoder stream was because we need to be able to
check that the settings used with the encoder *can* be used first.
This problem was especially annoying if you had both video/audio
encoding. Before, you'd have to check the return value from
obs_encoder_start, and if that second encoder fails, then you
basically had to stop the first encoder again, making for
unnecessary boilerplate code whenever starting up two encoders.
2014-03-27 21:50:15 -07:00
|
|
|
return;
|
|
|
|
|
|
|
|
output->video = video;
|
|
|
|
output->audio = audio;
|
|
|
|
}
|
|
|
|
|
2014-09-26 15:25:59 -07:00
|
|
|
video_t *obs_output_video(const obs_output_t *output)
|
Implement encoder usage with outputs
- Make it so that encoders can be assigned to outputs. If an encoder
is destroyed, it will automatically remove itself from that output.
I specifically didn't want to do reference counting because it leaves
too much potential for unchecked references and it just felt like it
would be more trouble than it's worth.
- Add a 'flags' value to the output definition structure. This lets
the output specify if it uses video/audio, and whether the output is
meant to be used with OBS encoders or not.
- Remove boilerplate code for outputs. This makes it easier to program
outputs. The boilerplate code involved before was mostly just
involving connecting to the audio/video data streams directly in each
output plugin.
Instead of doing that, simply add plugin callback functions for
receiving video/audio (either encoded or non-encoded, whichever it's
set to use), and then call obs_output_begin_data_capture and
obs_output_end_data_capture to automatically handle setting up
connections to raw or encoded video/audio streams for the plugin.
- Remove 'active' function from output callbacks, as it's no longer
really needed now that the libobs output context automatically knows
when the output is active or not.
- Make it so that an encoder cannot be destroyed until all data
connections to the encoder have been removed.
- Change the 'start' and 'stop' functions in the encoder interface to
just an 'initialize' callback, which initializes the encoder.
- Make it so that the encoder must be initialized first before the data
stream can be started. The reason why initialization was separated
from starting the encoder stream was because we need to be able to
check that the settings used with the encoder *can* be used first.
This problem was especially annoying if you had both video/audio
encoding. Before, you'd have to check the return value from
obs_encoder_start, and if that second encoder fails, then you
basically had to stop the first encoder again, making for
unnecessary boilerplate code whenever starting up two encoders.
2014-03-27 21:50:15 -07:00
|
|
|
{
|
2019-06-22 22:13:45 -07:00
|
|
|
return obs_output_valid(output, "obs_output_video") ? output->video
|
|
|
|
: NULL;
|
Implement encoder usage with outputs
- Make it so that encoders can be assigned to outputs. If an encoder
is destroyed, it will automatically remove itself from that output.
I specifically didn't want to do reference counting because it leaves
too much potential for unchecked references and it just felt like it
would be more trouble than it's worth.
- Add a 'flags' value to the output definition structure. This lets
the output specify if it uses video/audio, and whether the output is
meant to be used with OBS encoders or not.
- Remove boilerplate code for outputs. This makes it easier to program
outputs. The boilerplate code involved before was mostly just
involving connecting to the audio/video data streams directly in each
output plugin.
Instead of doing that, simply add plugin callback functions for
receiving video/audio (either encoded or non-encoded, whichever it's
set to use), and then call obs_output_begin_data_capture and
obs_output_end_data_capture to automatically handle setting up
connections to raw or encoded video/audio streams for the plugin.
- Remove 'active' function from output callbacks, as it's no longer
really needed now that the libobs output context automatically knows
when the output is active or not.
- Make it so that an encoder cannot be destroyed until all data
connections to the encoder have been removed.
- Change the 'start' and 'stop' functions in the encoder interface to
just an 'initialize' callback, which initializes the encoder.
- Make it so that the encoder must be initialized first before the data
stream can be started. The reason why initialization was separated
from starting the encoder stream was because we need to be able to
check that the settings used with the encoder *can* be used first.
This problem was especially annoying if you had both video/audio
encoding. Before, you'd have to check the return value from
obs_encoder_start, and if that second encoder fails, then you
basically had to stop the first encoder again, making for
unnecessary boilerplate code whenever starting up two encoders.
2014-03-27 21:50:15 -07:00
|
|
|
}
|
|
|
|
|
2014-09-26 15:25:59 -07:00
|
|
|
audio_t *obs_output_audio(const obs_output_t *output)
|
Implement encoder usage with outputs
- Make it so that encoders can be assigned to outputs. If an encoder
is destroyed, it will automatically remove itself from that output.
I specifically didn't want to do reference counting because it leaves
too much potential for unchecked references and it just felt like it
would be more trouble than it's worth.
- Add a 'flags' value to the output definition structure. This lets
the output specify if it uses video/audio, and whether the output is
meant to be used with OBS encoders or not.
- Remove boilerplate code for outputs. This makes it easier to program
outputs. The boilerplate code involved before was mostly just
involving connecting to the audio/video data streams directly in each
output plugin.
Instead of doing that, simply add plugin callback functions for
receiving video/audio (either encoded or non-encoded, whichever it's
set to use), and then call obs_output_begin_data_capture and
obs_output_end_data_capture to automatically handle setting up
connections to raw or encoded video/audio streams for the plugin.
- Remove 'active' function from output callbacks, as it's no longer
really needed now that the libobs output context automatically knows
when the output is active or not.
- Make it so that an encoder cannot be destroyed until all data
connections to the encoder have been removed.
- Change the 'start' and 'stop' functions in the encoder interface to
just an 'initialize' callback, which initializes the encoder.
- Make it so that the encoder must be initialized first before the data
stream can be started. The reason why initialization was separated
from starting the encoder stream was because we need to be able to
check that the settings used with the encoder *can* be used first.
This problem was especially annoying if you had both video/audio
encoding. Before, you'd have to check the return value from
obs_encoder_start, and if that second encoder fails, then you
basically had to stop the first encoder again, making for
unnecessary boilerplate code whenever starting up two encoders.
2014-03-27 21:50:15 -07:00
|
|
|
{
|
2019-06-22 22:13:45 -07:00
|
|
|
return obs_output_valid(output, "obs_output_audio") ? output->audio
|
|
|
|
: NULL;
|
Implement encoder usage with outputs
- Make it so that encoders can be assigned to outputs. If an encoder
is destroyed, it will automatically remove itself from that output.
I specifically didn't want to do reference counting because it leaves
too much potential for unchecked references and it just felt like it
would be more trouble than it's worth.
- Add a 'flags' value to the output definition structure. This lets
the output specify if it uses video/audio, and whether the output is
meant to be used with OBS encoders or not.
- Remove boilerplate code for outputs. This makes it easier to program
outputs. The boilerplate code involved before was mostly just
involving connecting to the audio/video data streams directly in each
output plugin.
Instead of doing that, simply add plugin callback functions for
receiving video/audio (either encoded or non-encoded, whichever it's
set to use), and then call obs_output_begin_data_capture and
obs_output_end_data_capture to automatically handle setting up
connections to raw or encoded video/audio streams for the plugin.
- Remove 'active' function from output callbacks, as it's no longer
really needed now that the libobs output context automatically knows
when the output is active or not.
- Make it so that an encoder cannot be destroyed until all data
connections to the encoder have been removed.
- Change the 'start' and 'stop' functions in the encoder interface to
just an 'initialize' callback, which initializes the encoder.
- Make it so that the encoder must be initialized first before the data
stream can be started. The reason why initialization was separated
from starting the encoder stream was because we need to be able to
check that the settings used with the encoder *can* be used first.
This problem was especially annoying if you had both video/audio
encoding. Before, you'd have to check the return value from
obs_encoder_start, and if that second encoder fails, then you
basically had to stop the first encoder again, making for
unnecessary boilerplate code whenever starting up two encoders.
2014-03-27 21:50:15 -07:00
|
|
|
}
|
|
|
|
|
2018-10-04 20:04:40 -07:00
|
|
|
static inline size_t get_first_mixer(const obs_output_t *output)
|
|
|
|
{
|
|
|
|
for (size_t i = 0; i < MAX_AUDIO_MIXES; i++) {
|
|
|
|
if ((((size_t)1 << i) & output->mixer_mask) != 0) {
|
|
|
|
return i;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
(API Change) Add support for multiple audio mixers
API changed:
--------------------------
void obs_output_set_audio_encoder(
obs_output_t *output,
obs_encoder_t *encoder);
obs_encoder_t *obs_output_get_audio_encoder(
const obs_output_t *output);
obs_encoder_t *obs_audio_encoder_create(
const char *id,
const char *name,
obs_data_t *settings);
Changed to:
--------------------------
/* 'idx' specifies the track index of the output */
void obs_output_set_audio_encoder(
obs_output_t *output,
obs_encoder_t *encoder,
size_t idx);
/* 'idx' specifies the track index of the output */
obs_encoder_t *obs_output_get_audio_encoder(
const obs_output_t *output,
size_t idx);
/* 'mixer_idx' specifies the mixer index to capture audio from */
obs_encoder_t *obs_audio_encoder_create(
const char *id,
const char *name,
obs_data_t *settings,
size_t mixer_idx);
Overview
--------------------------
This feature allows multiple audio mixers to be used at a time. This
capability was able to be added with surprisingly very little extra
overhead. Audio will not be mixed unless it's assigned to a specific
mixer, and mixers will not mix unless they have an active mix
connection.
Mostly this will be useful for being able to separate out specific audio
for recording versus streaming, but will also be useful for certain
streaming services that support multiple audio streams via RTMP.
I didn't want to use a variable amount of mixers due to the desire to
reduce heap allocations, so currently I set the limit to 4 simultaneous
mixers; this number can be increased later if needed, but honestly I
feel like it's just the right number to use.
Sources:
Sources can now specify which audio mixers their audio is mixed to; this
can be a single mixer or multiple mixers at a time. The
obs_source_set_audio_mixers function sets the audio mixer which an audio
source applies to. For example, 0xF would mean that the source applies
to all four mixers.
Audio Encoders:
Audio encoders now must specify which specific audio mixer they use when
they encode audio data.
Outputs:
Outputs that use encoders can now support multiple audio tracks at once
if they have the OBS_OUTPUT_MULTI_TRACK capability flag set. This is
mostly only useful for certain types of RTMP transmissions, though may
be useful for file formats that support multiple audio tracks as well
later on.
2015-01-14 02:12:08 -08:00
|
|
|
void obs_output_set_mixer(obs_output_t *output, size_t mixer_idx)
|
|
|
|
{
|
2015-10-17 02:51:13 -07:00
|
|
|
if (!obs_output_valid(output, "obs_output_set_mixer"))
|
(API Change) Add support for multiple audio mixers
API changed:
--------------------------
void obs_output_set_audio_encoder(
obs_output_t *output,
obs_encoder_t *encoder);
obs_encoder_t *obs_output_get_audio_encoder(
const obs_output_t *output);
obs_encoder_t *obs_audio_encoder_create(
const char *id,
const char *name,
obs_data_t *settings);
Changed to:
--------------------------
/* 'idx' specifies the track index of the output */
void obs_output_set_audio_encoder(
obs_output_t *output,
obs_encoder_t *encoder,
size_t idx);
/* 'idx' specifies the track index of the output */
obs_encoder_t *obs_output_get_audio_encoder(
const obs_output_t *output,
size_t idx);
/* 'mixer_idx' specifies the mixer index to capture audio from */
obs_encoder_t *obs_audio_encoder_create(
const char *id,
const char *name,
obs_data_t *settings,
size_t mixer_idx);
Overview
--------------------------
This feature allows multiple audio mixers to be used at a time. This
capability was able to be added with surprisingly very little extra
overhead. Audio will not be mixed unless it's assigned to a specific
mixer, and mixers will not mix unless they have an active mix
connection.
Mostly this will be useful for being able to separate out specific audio
for recording versus streaming, but will also be useful for certain
streaming services that support multiple audio streams via RTMP.
I didn't want to use a variable amount of mixers due to the desire to
reduce heap allocations, so currently I set the limit to 4 simultaneous
mixers; this number can be increased later if needed, but honestly I
feel like it's just the right number to use.
Sources:
Sources can now specify which audio mixers their audio is mixed to; this
can be a single mixer or multiple mixers at a time. The
obs_source_set_audio_mixers function sets the audio mixer which an audio
source applies to. For example, 0xF would mean that the source applies
to all four mixers.
Audio Encoders:
Audio encoders now must specify which specific audio mixer they use when
they encode audio data.
Outputs:
Outputs that use encoders can now support multiple audio tracks at once
if they have the OBS_OUTPUT_MULTI_TRACK capability flag set. This is
mostly only useful for certain types of RTMP transmissions, though may
be useful for file formats that support multiple audio tracks as well
later on.
2015-01-14 02:12:08 -08:00
|
|
|
return;
|
|
|
|
|
2016-06-20 16:07:29 -07:00
|
|
|
if (!active(output))
|
2018-10-04 20:04:40 -07:00
|
|
|
output->mixer_mask = (size_t)1 << mixer_idx;
|
(API Change) Add support for multiple audio mixers
API changed:
--------------------------
void obs_output_set_audio_encoder(
obs_output_t *output,
obs_encoder_t *encoder);
obs_encoder_t *obs_output_get_audio_encoder(
const obs_output_t *output);
obs_encoder_t *obs_audio_encoder_create(
const char *id,
const char *name,
obs_data_t *settings);
Changed to:
--------------------------
/* 'idx' specifies the track index of the output */
void obs_output_set_audio_encoder(
obs_output_t *output,
obs_encoder_t *encoder,
size_t idx);
/* 'idx' specifies the track index of the output */
obs_encoder_t *obs_output_get_audio_encoder(
const obs_output_t *output,
size_t idx);
/* 'mixer_idx' specifies the mixer index to capture audio from */
obs_encoder_t *obs_audio_encoder_create(
const char *id,
const char *name,
obs_data_t *settings,
size_t mixer_idx);
Overview
--------------------------
This feature allows multiple audio mixers to be used at a time. This
capability was able to be added with surprisingly very little extra
overhead. Audio will not be mixed unless it's assigned to a specific
mixer, and mixers will not mix unless they have an active mix
connection.
Mostly this will be useful for being able to separate out specific audio
for recording versus streaming, but will also be useful for certain
streaming services that support multiple audio streams via RTMP.
I didn't want to use a variable amount of mixers due to the desire to
reduce heap allocations, so currently I set the limit to 4 simultaneous
mixers; this number can be increased later if needed, but honestly I
feel like it's just the right number to use.
Sources:
Sources can now specify which audio mixers their audio is mixed to; this
can be a single mixer or multiple mixers at a time. The
obs_source_set_audio_mixers function sets the audio mixer which an audio
source applies to. For example, 0xF would mean that the source applies
to all four mixers.
Audio Encoders:
Audio encoders now must specify which specific audio mixer they use when
they encode audio data.
Outputs:
Outputs that use encoders can now support multiple audio tracks at once
if they have the OBS_OUTPUT_MULTI_TRACK capability flag set. This is
mostly only useful for certain types of RTMP transmissions, though may
be useful for file formats that support multiple audio tracks as well
later on.
2015-01-14 02:12:08 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
size_t obs_output_get_mixer(const obs_output_t *output)
|
|
|
|
{
|
2018-10-04 20:04:40 -07:00
|
|
|
if (!obs_output_valid(output, "obs_output_get_mixer"))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
return get_first_mixer(output);
|
|
|
|
}
|
|
|
|
|
|
|
|
void obs_output_set_mixers(obs_output_t *output, size_t mixers)
|
|
|
|
{
|
|
|
|
if (!obs_output_valid(output, "obs_output_set_mixers"))
|
|
|
|
return;
|
|
|
|
|
|
|
|
output->mixer_mask = mixers;
|
|
|
|
}
|
|
|
|
|
|
|
|
size_t obs_output_get_mixers(const obs_output_t *output)
|
|
|
|
{
|
2019-06-22 22:13:45 -07:00
|
|
|
return obs_output_valid(output, "obs_output_get_mixers")
|
|
|
|
? output->mixer_mask
|
|
|
|
: 0;
|
(API Change) Add support for multiple audio mixers
API changed:
--------------------------
void obs_output_set_audio_encoder(
obs_output_t *output,
obs_encoder_t *encoder);
obs_encoder_t *obs_output_get_audio_encoder(
const obs_output_t *output);
obs_encoder_t *obs_audio_encoder_create(
const char *id,
const char *name,
obs_data_t *settings);
Changed to:
--------------------------
/* 'idx' specifies the track index of the output */
void obs_output_set_audio_encoder(
obs_output_t *output,
obs_encoder_t *encoder,
size_t idx);
/* 'idx' specifies the track index of the output */
obs_encoder_t *obs_output_get_audio_encoder(
const obs_output_t *output,
size_t idx);
/* 'mixer_idx' specifies the mixer index to capture audio from */
obs_encoder_t *obs_audio_encoder_create(
const char *id,
const char *name,
obs_data_t *settings,
size_t mixer_idx);
Overview
--------------------------
This feature allows multiple audio mixers to be used at a time. This
capability was able to be added with surprisingly very little extra
overhead. Audio will not be mixed unless it's assigned to a specific
mixer, and mixers will not mix unless they have an active mix
connection.
Mostly this will be useful for being able to separate out specific audio
for recording versus streaming, but will also be useful for certain
streaming services that support multiple audio streams via RTMP.
I didn't want to use a variable amount of mixers due to the desire to
reduce heap allocations, so currently I set the limit to 4 simultaneous
mixers; this number can be increased later if needed, but honestly I
feel like it's just the right number to use.
Sources:
Sources can now specify which audio mixers their audio is mixed to; this
can be a single mixer or multiple mixers at a time. The
obs_source_set_audio_mixers function sets the audio mixer which an audio
source applies to. For example, 0xF would mean that the source applies
to all four mixers.
Audio Encoders:
Audio encoders now must specify which specific audio mixer they use when
they encode audio data.
Outputs:
Outputs that use encoders can now support multiple audio tracks at once
if they have the OBS_OUTPUT_MULTI_TRACK capability flag set. This is
mostly only useful for certain types of RTMP transmissions, though may
be useful for file formats that support multiple audio tracks as well
later on.
2015-01-14 02:12:08 -08:00
|
|
|
}
|
|
|
|
|
Implement encoder usage with outputs
- Make it so that encoders can be assigned to outputs. If an encoder
is destroyed, it will automatically remove itself from that output.
I specifically didn't want to do reference counting because it leaves
too much potential for unchecked references and it just felt like it
would be more trouble than it's worth.
- Add a 'flags' value to the output definition structure. This lets
the output specify if it uses video/audio, and whether the output is
meant to be used with OBS encoders or not.
- Remove boilerplate code for outputs. This makes it easier to program
outputs. The boilerplate code involved before was mostly just
involving connecting to the audio/video data streams directly in each
output plugin.
Instead of doing that, simply add plugin callback functions for
receiving video/audio (either encoded or non-encoded, whichever it's
set to use), and then call obs_output_begin_data_capture and
obs_output_end_data_capture to automatically handle setting up
connections to raw or encoded video/audio streams for the plugin.
- Remove 'active' function from output callbacks, as it's no longer
really needed now that the libobs output context automatically knows
when the output is active or not.
- Make it so that an encoder cannot be destroyed until all data
connections to the encoder have been removed.
- Change the 'start' and 'stop' functions in the encoder interface to
just an 'initialize' callback, which initializes the encoder.
- Make it so that the encoder must be initialized first before the data
stream can be started. The reason why initialization was separated
from starting the encoder stream was because we need to be able to
check that the settings used with the encoder *can* be used first.
This problem was especially annoying if you had both video/audio
encoding. Before, you'd have to check the return value from
obs_encoder_start, and if that second encoder fails, then you
basically had to stop the first encoder again, making for
unnecessary boilerplate code whenever starting up two encoders.
2014-03-27 21:50:15 -07:00
|
|
|
void obs_output_remove_encoder(struct obs_output *output,
|
2019-06-22 22:13:45 -07:00
|
|
|
struct obs_encoder *encoder)
|
Implement encoder usage with outputs
- Make it so that encoders can be assigned to outputs. If an encoder
is destroyed, it will automatically remove itself from that output.
I specifically didn't want to do reference counting because it leaves
too much potential for unchecked references and it just felt like it
would be more trouble than it's worth.
- Add a 'flags' value to the output definition structure. This lets
the output specify if it uses video/audio, and whether the output is
meant to be used with OBS encoders or not.
- Remove boilerplate code for outputs. This makes it easier to program
outputs. The boilerplate code involved before was mostly just
involving connecting to the audio/video data streams directly in each
output plugin.
Instead of doing that, simply add plugin callback functions for
receiving video/audio (either encoded or non-encoded, whichever it's
set to use), and then call obs_output_begin_data_capture and
obs_output_end_data_capture to automatically handle setting up
connections to raw or encoded video/audio streams for the plugin.
- Remove 'active' function from output callbacks, as it's no longer
really needed now that the libobs output context automatically knows
when the output is active or not.
- Make it so that an encoder cannot be destroyed until all data
connections to the encoder have been removed.
- Change the 'start' and 'stop' functions in the encoder interface to
just an 'initialize' callback, which initializes the encoder.
- Make it so that the encoder must be initialized first before the data
stream can be started. The reason why initialization was separated
from starting the encoder stream was because we need to be able to
check that the settings used with the encoder *can* be used first.
This problem was especially annoying if you had both video/audio
encoding. Before, you'd have to check the return value from
obs_encoder_start, and if that second encoder fails, then you
basically had to stop the first encoder again, making for
unnecessary boilerplate code whenever starting up two encoders.
2014-03-27 21:50:15 -07:00
|
|
|
{
|
2015-10-17 02:51:13 -07:00
|
|
|
if (!obs_output_valid(output, "obs_output_remove_encoder"))
|
|
|
|
return;
|
Implement encoder usage with outputs
- Make it so that encoders can be assigned to outputs. If an encoder
is destroyed, it will automatically remove itself from that output.
I specifically didn't want to do reference counting because it leaves
too much potential for unchecked references and it just felt like it
would be more trouble than it's worth.
- Add a 'flags' value to the output definition structure. This lets
the output specify if it uses video/audio, and whether the output is
meant to be used with OBS encoders or not.
- Remove boilerplate code for outputs. This makes it easier to program
outputs. The boilerplate code involved before was mostly just
involving connecting to the audio/video data streams directly in each
output plugin.
Instead of doing that, simply add plugin callback functions for
receiving video/audio (either encoded or non-encoded, whichever it's
set to use), and then call obs_output_begin_data_capture and
obs_output_end_data_capture to automatically handle setting up
connections to raw or encoded video/audio streams for the plugin.
- Remove 'active' function from output callbacks, as it's no longer
really needed now that the libobs output context automatically knows
when the output is active or not.
- Make it so that an encoder cannot be destroyed until all data
connections to the encoder have been removed.
- Change the 'start' and 'stop' functions in the encoder interface to
just an 'initialize' callback, which initializes the encoder.
- Make it so that the encoder must be initialized first before the data
stream can be started. The reason why initialization was separated
from starting the encoder stream was because we need to be able to
check that the settings used with the encoder *can* be used first.
This problem was especially annoying if you had both video/audio
encoding. Before, you'd have to check the return value from
obs_encoder_start, and if that second encoder fails, then you
basically had to stop the first encoder again, making for
unnecessary boilerplate code whenever starting up two encoders.
2014-03-27 21:50:15 -07:00
|
|
|
|
(API Change) Add support for multiple audio mixers
API changed:
--------------------------
void obs_output_set_audio_encoder(
obs_output_t *output,
obs_encoder_t *encoder);
obs_encoder_t *obs_output_get_audio_encoder(
const obs_output_t *output);
obs_encoder_t *obs_audio_encoder_create(
const char *id,
const char *name,
obs_data_t *settings);
Changed to:
--------------------------
/* 'idx' specifies the track index of the output */
void obs_output_set_audio_encoder(
obs_output_t *output,
obs_encoder_t *encoder,
size_t idx);
/* 'idx' specifies the track index of the output */
obs_encoder_t *obs_output_get_audio_encoder(
const obs_output_t *output,
size_t idx);
/* 'mixer_idx' specifies the mixer index to capture audio from */
obs_encoder_t *obs_audio_encoder_create(
const char *id,
const char *name,
obs_data_t *settings,
size_t mixer_idx);
Overview
--------------------------
This feature allows multiple audio mixers to be used at a time. This
capability was able to be added with surprisingly very little extra
overhead. Audio will not be mixed unless it's assigned to a specific
mixer, and mixers will not mix unless they have an active mix
connection.
Mostly this will be useful for being able to separate out specific audio
for recording versus streaming, but will also be useful for certain
streaming services that support multiple audio streams via RTMP.
I didn't want to use a variable amount of mixers due to the desire to
reduce heap allocations, so currently I set the limit to 4 simultaneous
mixers; this number can be increased later if needed, but honestly I
feel like it's just the right number to use.
Sources:
Sources can now specify which audio mixers their audio is mixed to; this
can be a single mixer or multiple mixers at a time. The
obs_source_set_audio_mixers function sets the audio mixer which an audio
source applies to. For example, 0xF would mean that the source applies
to all four mixers.
Audio Encoders:
Audio encoders now must specify which specific audio mixer they use when
they encode audio data.
Outputs:
Outputs that use encoders can now support multiple audio tracks at once
if they have the OBS_OUTPUT_MULTI_TRACK capability flag set. This is
mostly only useful for certain types of RTMP transmissions, though may
be useful for file formats that support multiple audio tracks as well
later on.
2015-01-14 02:12:08 -08:00
|
|
|
if (output->video_encoder == encoder) {
|
Implement encoder usage with outputs
- Make it so that encoders can be assigned to outputs. If an encoder
is destroyed, it will automatically remove itself from that output.
I specifically didn't want to do reference counting because it leaves
too much potential for unchecked references and it just felt like it
would be more trouble than it's worth.
- Add a 'flags' value to the output definition structure. This lets
the output specify if it uses video/audio, and whether the output is
meant to be used with OBS encoders or not.
- Remove boilerplate code for outputs. This makes it easier to program
outputs. The boilerplate code involved before was mostly just
involving connecting to the audio/video data streams directly in each
output plugin.
Instead of doing that, simply add plugin callback functions for
receiving video/audio (either encoded or non-encoded, whichever it's
set to use), and then call obs_output_begin_data_capture and
obs_output_end_data_capture to automatically handle setting up
connections to raw or encoded video/audio streams for the plugin.
- Remove 'active' function from output callbacks, as it's no longer
really needed now that the libobs output context automatically knows
when the output is active or not.
- Make it so that an encoder cannot be destroyed until all data
connections to the encoder have been removed.
- Change the 'start' and 'stop' functions in the encoder interface to
just an 'initialize' callback, which initializes the encoder.
- Make it so that the encoder must be initialized first before the data
stream can be started. The reason why initialization was separated
from starting the encoder stream was because we need to be able to
check that the settings used with the encoder *can* be used first.
This problem was especially annoying if you had both video/audio
encoding. Before, you'd have to check the return value from
obs_encoder_start, and if that second encoder fails, then you
basically had to stop the first encoder again, making for
unnecessary boilerplate code whenever starting up two encoders.
2014-03-27 21:50:15 -07:00
|
|
|
output->video_encoder = NULL;
|
(API Change) Add support for multiple audio mixers
API changed:
--------------------------
void obs_output_set_audio_encoder(
obs_output_t *output,
obs_encoder_t *encoder);
obs_encoder_t *obs_output_get_audio_encoder(
const obs_output_t *output);
obs_encoder_t *obs_audio_encoder_create(
const char *id,
const char *name,
obs_data_t *settings);
Changed to:
--------------------------
/* 'idx' specifies the track index of the output */
void obs_output_set_audio_encoder(
obs_output_t *output,
obs_encoder_t *encoder,
size_t idx);
/* 'idx' specifies the track index of the output */
obs_encoder_t *obs_output_get_audio_encoder(
const obs_output_t *output,
size_t idx);
/* 'mixer_idx' specifies the mixer index to capture audio from */
obs_encoder_t *obs_audio_encoder_create(
const char *id,
const char *name,
obs_data_t *settings,
size_t mixer_idx);
Overview
--------------------------
This feature allows multiple audio mixers to be used at a time. This
capability was able to be added with surprisingly very little extra
overhead. Audio will not be mixed unless it's assigned to a specific
mixer, and mixers will not mix unless they have an active mix
connection.
Mostly this will be useful for being able to separate out specific audio
for recording versus streaming, but will also be useful for certain
streaming services that support multiple audio streams via RTMP.
I didn't want to use a variable amount of mixers due to the desire to
reduce heap allocations, so currently I set the limit to 4 simultaneous
mixers; this number can be increased later if needed, but honestly I
feel like it's just the right number to use.
Sources:
Sources can now specify which audio mixers their audio is mixed to; this
can be a single mixer or multiple mixers at a time. The
obs_source_set_audio_mixers function sets the audio mixer which an audio
source applies to. For example, 0xF would mean that the source applies
to all four mixers.
Audio Encoders:
Audio encoders now must specify which specific audio mixer they use when
they encode audio data.
Outputs:
Outputs that use encoders can now support multiple audio tracks at once
if they have the OBS_OUTPUT_MULTI_TRACK capability flag set. This is
mostly only useful for certain types of RTMP transmissions, though may
be useful for file formats that support multiple audio tracks as well
later on.
2015-01-14 02:12:08 -08:00
|
|
|
} else {
|
|
|
|
for (size_t i = 0; i < MAX_AUDIO_MIXES; i++) {
|
|
|
|
if (output->audio_encoders[i] == encoder)
|
|
|
|
output->audio_encoders[i] = NULL;
|
|
|
|
}
|
|
|
|
}
|
Implement encoder usage with outputs
- Make it so that encoders can be assigned to outputs. If an encoder
is destroyed, it will automatically remove itself from that output.
I specifically didn't want to do reference counting because it leaves
too much potential for unchecked references and it just felt like it
would be more trouble than it's worth.
- Add a 'flags' value to the output definition structure. This lets
the output specify if it uses video/audio, and whether the output is
meant to be used with OBS encoders or not.
- Remove boilerplate code for outputs. This makes it easier to program
outputs. The boilerplate code involved before was mostly just
involving connecting to the audio/video data streams directly in each
output plugin.
Instead of doing that, simply add plugin callback functions for
receiving video/audio (either encoded or non-encoded, whichever it's
set to use), and then call obs_output_begin_data_capture and
obs_output_end_data_capture to automatically handle setting up
connections to raw or encoded video/audio streams for the plugin.
- Remove 'active' function from output callbacks, as it's no longer
really needed now that the libobs output context automatically knows
when the output is active or not.
- Make it so that an encoder cannot be destroyed until all data
connections to the encoder have been removed.
- Change the 'start' and 'stop' functions in the encoder interface to
just an 'initialize' callback, which initializes the encoder.
- Make it so that the encoder must be initialized first before the data
stream can be started. The reason why initialization was separated
from starting the encoder stream was because we need to be able to
check that the settings used with the encoder *can* be used first.
This problem was especially annoying if you had both video/audio
encoding. Before, you'd have to check the return value from
obs_encoder_start, and if that second encoder fails, then you
basically had to stop the first encoder again, making for
unnecessary boilerplate code whenever starting up two encoders.
2014-03-27 21:50:15 -07:00
|
|
|
}
|
|
|
|
|
2014-09-25 17:44:05 -07:00
|
|
|
void obs_output_set_video_encoder(obs_output_t *output, obs_encoder_t *encoder)
|
Implement encoder usage with outputs
- Make it so that encoders can be assigned to outputs. If an encoder
is destroyed, it will automatically remove itself from that output.
I specifically didn't want to do reference counting because it leaves
too much potential for unchecked references and it just felt like it
would be more trouble than it's worth.
- Add a 'flags' value to the output definition structure. This lets
the output specify if it uses video/audio, and whether the output is
meant to be used with OBS encoders or not.
- Remove boilerplate code for outputs. This makes it easier to program
outputs. The boilerplate code involved before was mostly just
involving connecting to the audio/video data streams directly in each
output plugin.
Instead of doing that, simply add plugin callback functions for
receiving video/audio (either encoded or non-encoded, whichever it's
set to use), and then call obs_output_begin_data_capture and
obs_output_end_data_capture to automatically handle setting up
connections to raw or encoded video/audio streams for the plugin.
- Remove 'active' function from output callbacks, as it's no longer
really needed now that the libobs output context automatically knows
when the output is active or not.
- Make it so that an encoder cannot be destroyed until all data
connections to the encoder have been removed.
- Change the 'start' and 'stop' functions in the encoder interface to
just an 'initialize' callback, which initializes the encoder.
- Make it so that the encoder must be initialized first before the data
stream can be started. The reason why initialization was separated
from starting the encoder stream was because we need to be able to
check that the settings used with the encoder *can* be used first.
This problem was especially annoying if you had both video/audio
encoding. Before, you'd have to check the return value from
obs_encoder_start, and if that second encoder fails, then you
basically had to stop the first encoder again, making for
unnecessary boilerplate code whenever starting up two encoders.
2014-03-27 21:50:15 -07:00
|
|
|
{
|
2015-10-17 02:51:13 -07:00
|
|
|
if (!obs_output_valid(output, "obs_output_set_video_encoder"))
|
|
|
|
return;
|
|
|
|
if (encoder && encoder->info.type != OBS_ENCODER_VIDEO) {
|
|
|
|
blog(LOG_WARNING, "obs_output_set_video_encoder: "
|
2019-06-22 22:13:45 -07:00
|
|
|
"encoder passed is not a video encoder");
|
2015-10-17 02:51:13 -07:00
|
|
|
return;
|
|
|
|
}
|
2020-09-23 14:47:30 -07:00
|
|
|
if (active(output)) {
|
|
|
|
blog(LOG_WARNING,
|
|
|
|
"%s: tried to set video encoder on output \"%s\" "
|
|
|
|
"while the output is still active!",
|
|
|
|
__FUNCTION__, output->context.name);
|
|
|
|
return;
|
|
|
|
}
|
2015-10-17 02:51:13 -07:00
|
|
|
|
2019-06-22 22:13:45 -07:00
|
|
|
if (output->video_encoder == encoder)
|
|
|
|
return;
|
Implement encoder usage with outputs
- Make it so that encoders can be assigned to outputs. If an encoder
is destroyed, it will automatically remove itself from that output.
I specifically didn't want to do reference counting because it leaves
too much potential for unchecked references and it just felt like it
would be more trouble than it's worth.
- Add a 'flags' value to the output definition structure. This lets
the output specify if it uses video/audio, and whether the output is
meant to be used with OBS encoders or not.
- Remove boilerplate code for outputs. This makes it easier to program
outputs. The boilerplate code involved before was mostly just
involving connecting to the audio/video data streams directly in each
output plugin.
Instead of doing that, simply add plugin callback functions for
receiving video/audio (either encoded or non-encoded, whichever it's
set to use), and then call obs_output_begin_data_capture and
obs_output_end_data_capture to automatically handle setting up
connections to raw or encoded video/audio streams for the plugin.
- Remove 'active' function from output callbacks, as it's no longer
really needed now that the libobs output context automatically knows
when the output is active or not.
- Make it so that an encoder cannot be destroyed until all data
connections to the encoder have been removed.
- Change the 'start' and 'stop' functions in the encoder interface to
just an 'initialize' callback, which initializes the encoder.
- Make it so that the encoder must be initialized first before the data
stream can be started. The reason why initialization was separated
from starting the encoder stream was because we need to be able to
check that the settings used with the encoder *can* be used first.
This problem was especially annoying if you had both video/audio
encoding. Before, you'd have to check the return value from
obs_encoder_start, and if that second encoder fails, then you
basically had to stop the first encoder again, making for
unnecessary boilerplate code whenever starting up two encoders.
2014-03-27 21:50:15 -07:00
|
|
|
|
2014-10-22 19:42:09 -07:00
|
|
|
obs_encoder_remove_output(output->video_encoder, output);
|
Implement encoder usage with outputs
- Make it so that encoders can be assigned to outputs. If an encoder
is destroyed, it will automatically remove itself from that output.
I specifically didn't want to do reference counting because it leaves
too much potential for unchecked references and it just felt like it
would be more trouble than it's worth.
- Add a 'flags' value to the output definition structure. This lets
the output specify if it uses video/audio, and whether the output is
meant to be used with OBS encoders or not.
- Remove boilerplate code for outputs. This makes it easier to program
outputs. The boilerplate code involved before was mostly just
involving connecting to the audio/video data streams directly in each
output plugin.
Instead of doing that, simply add plugin callback functions for
receiving video/audio (either encoded or non-encoded, whichever it's
set to use), and then call obs_output_begin_data_capture and
obs_output_end_data_capture to automatically handle setting up
connections to raw or encoded video/audio streams for the plugin.
- Remove 'active' function from output callbacks, as it's no longer
really needed now that the libobs output context automatically knows
when the output is active or not.
- Make it so that an encoder cannot be destroyed until all data
connections to the encoder have been removed.
- Change the 'start' and 'stop' functions in the encoder interface to
just an 'initialize' callback, which initializes the encoder.
- Make it so that the encoder must be initialized first before the data
stream can be started. The reason why initialization was separated
from starting the encoder stream was because we need to be able to
check that the settings used with the encoder *can* be used first.
This problem was especially annoying if you had both video/audio
encoding. Before, you'd have to check the return value from
obs_encoder_start, and if that second encoder fails, then you
basically had to stop the first encoder again, making for
unnecessary boilerplate code whenever starting up two encoders.
2014-03-27 21:50:15 -07:00
|
|
|
obs_encoder_add_output(encoder, output);
|
|
|
|
output->video_encoder = encoder;
|
2014-08-10 16:50:44 -07:00
|
|
|
|
|
|
|
/* set the preferred resolution on the encoder */
|
|
|
|
if (output->scaled_width && output->scaled_height)
|
|
|
|
obs_encoder_set_scaled_size(output->video_encoder,
|
2019-06-22 22:13:45 -07:00
|
|
|
output->scaled_width,
|
|
|
|
output->scaled_height);
|
Implement encoder usage with outputs
- Make it so that encoders can be assigned to outputs. If an encoder
is destroyed, it will automatically remove itself from that output.
I specifically didn't want to do reference counting because it leaves
too much potential for unchecked references and it just felt like it
would be more trouble than it's worth.
- Add a 'flags' value to the output definition structure. This lets
the output specify if it uses video/audio, and whether the output is
meant to be used with OBS encoders or not.
- Remove boilerplate code for outputs. This makes it easier to program
outputs. The boilerplate code involved before was mostly just
involving connecting to the audio/video data streams directly in each
output plugin.
Instead of doing that, simply add plugin callback functions for
receiving video/audio (either encoded or non-encoded, whichever it's
set to use), and then call obs_output_begin_data_capture and
obs_output_end_data_capture to automatically handle setting up
connections to raw or encoded video/audio streams for the plugin.
- Remove 'active' function from output callbacks, as it's no longer
really needed now that the libobs output context automatically knows
when the output is active or not.
- Make it so that an encoder cannot be destroyed until all data
connections to the encoder have been removed.
- Change the 'start' and 'stop' functions in the encoder interface to
just an 'initialize' callback, which initializes the encoder.
- Make it so that the encoder must be initialized first before the data
stream can be started. The reason why initialization was separated
from starting the encoder stream was because we need to be able to
check that the settings used with the encoder *can* be used first.
This problem was especially annoying if you had both video/audio
encoding. Before, you'd have to check the return value from
obs_encoder_start, and if that second encoder fails, then you
basically had to stop the first encoder again, making for
unnecessary boilerplate code whenever starting up two encoders.
2014-03-27 21:50:15 -07:00
|
|
|
}
|
|
|
|
|
(API Change) Add support for multiple audio mixers
API changed:
--------------------------
void obs_output_set_audio_encoder(
obs_output_t *output,
obs_encoder_t *encoder);
obs_encoder_t *obs_output_get_audio_encoder(
const obs_output_t *output);
obs_encoder_t *obs_audio_encoder_create(
const char *id,
const char *name,
obs_data_t *settings);
Changed to:
--------------------------
/* 'idx' specifies the track index of the output */
void obs_output_set_audio_encoder(
obs_output_t *output,
obs_encoder_t *encoder,
size_t idx);
/* 'idx' specifies the track index of the output */
obs_encoder_t *obs_output_get_audio_encoder(
const obs_output_t *output,
size_t idx);
/* 'mixer_idx' specifies the mixer index to capture audio from */
obs_encoder_t *obs_audio_encoder_create(
const char *id,
const char *name,
obs_data_t *settings,
size_t mixer_idx);
Overview
--------------------------
This feature allows multiple audio mixers to be used at a time. This
capability was able to be added with surprisingly very little extra
overhead. Audio will not be mixed unless it's assigned to a specific
mixer, and mixers will not mix unless they have an active mix
connection.
Mostly this will be useful for being able to separate out specific audio
for recording versus streaming, but will also be useful for certain
streaming services that support multiple audio streams via RTMP.
I didn't want to use a variable amount of mixers due to the desire to
reduce heap allocations, so currently I set the limit to 4 simultaneous
mixers; this number can be increased later if needed, but honestly I
feel like it's just the right number to use.
Sources:
Sources can now specify which audio mixers their audio is mixed to; this
can be a single mixer or multiple mixers at a time. The
obs_source_set_audio_mixers function sets the audio mixer which an audio
source applies to. For example, 0xF would mean that the source applies
to all four mixers.
Audio Encoders:
Audio encoders now must specify which specific audio mixer they use when
they encode audio data.
Outputs:
Outputs that use encoders can now support multiple audio tracks at once
if they have the OBS_OUTPUT_MULTI_TRACK capability flag set. This is
mostly only useful for certain types of RTMP transmissions, though may
be useful for file formats that support multiple audio tracks as well
later on.
2015-01-14 02:12:08 -08:00
|
|
|
void obs_output_set_audio_encoder(obs_output_t *output, obs_encoder_t *encoder,
|
2019-06-22 22:13:45 -07:00
|
|
|
size_t idx)
|
Implement encoder usage with outputs
- Make it so that encoders can be assigned to outputs. If an encoder
is destroyed, it will automatically remove itself from that output.
I specifically didn't want to do reference counting because it leaves
too much potential for unchecked references and it just felt like it
would be more trouble than it's worth.
- Add a 'flags' value to the output definition structure. This lets
the output specify if it uses video/audio, and whether the output is
meant to be used with OBS encoders or not.
- Remove boilerplate code for outputs. This makes it easier to program
outputs. The boilerplate code involved before was mostly just
involving connecting to the audio/video data streams directly in each
output plugin.
Instead of doing that, simply add plugin callback functions for
receiving video/audio (either encoded or non-encoded, whichever it's
set to use), and then call obs_output_begin_data_capture and
obs_output_end_data_capture to automatically handle setting up
connections to raw or encoded video/audio streams for the plugin.
- Remove 'active' function from output callbacks, as it's no longer
really needed now that the libobs output context automatically knows
when the output is active or not.
- Make it so that an encoder cannot be destroyed until all data
connections to the encoder have been removed.
- Change the 'start' and 'stop' functions in the encoder interface to
just an 'initialize' callback, which initializes the encoder.
- Make it so that the encoder must be initialized first before the data
stream can be started. The reason why initialization was separated
from starting the encoder stream was because we need to be able to
check that the settings used with the encoder *can* be used first.
This problem was especially annoying if you had both video/audio
encoding. Before, you'd have to check the return value from
obs_encoder_start, and if that second encoder fails, then you
basically had to stop the first encoder again, making for
unnecessary boilerplate code whenever starting up two encoders.
2014-03-27 21:50:15 -07:00
|
|
|
{
|
2015-10-17 02:51:13 -07:00
|
|
|
if (!obs_output_valid(output, "obs_output_set_audio_encoder"))
|
|
|
|
return;
|
|
|
|
if (encoder && encoder->info.type != OBS_ENCODER_AUDIO) {
|
|
|
|
blog(LOG_WARNING, "obs_output_set_audio_encoder: "
|
2019-06-22 22:13:45 -07:00
|
|
|
"encoder passed is not an audio encoder");
|
2015-10-17 02:51:13 -07:00
|
|
|
return;
|
|
|
|
}
|
2020-09-23 14:47:30 -07:00
|
|
|
if (active(output)) {
|
|
|
|
blog(LOG_WARNING,
|
|
|
|
"%s: tried to set audio encoder %d on output \"%s\" "
|
|
|
|
"while the output is still active!",
|
|
|
|
__FUNCTION__, (int)idx, output->context.name);
|
|
|
|
return;
|
|
|
|
}
|
Implement encoder usage with outputs
- Make it so that encoders can be assigned to outputs. If an encoder
is destroyed, it will automatically remove itself from that output.
I specifically didn't want to do reference counting because it leaves
too much potential for unchecked references and it just felt like it
would be more trouble than it's worth.
- Add a 'flags' value to the output definition structure. This lets
the output specify if it uses video/audio, and whether the output is
meant to be used with OBS encoders or not.
- Remove boilerplate code for outputs. This makes it easier to program
outputs. The boilerplate code involved before was mostly just
involving connecting to the audio/video data streams directly in each
output plugin.
Instead of doing that, simply add plugin callback functions for
receiving video/audio (either encoded or non-encoded, whichever it's
set to use), and then call obs_output_begin_data_capture and
obs_output_end_data_capture to automatically handle setting up
connections to raw or encoded video/audio streams for the plugin.
- Remove 'active' function from output callbacks, as it's no longer
really needed now that the libobs output context automatically knows
when the output is active or not.
- Make it so that an encoder cannot be destroyed until all data
connections to the encoder have been removed.
- Change the 'start' and 'stop' functions in the encoder interface to
just an 'initialize' callback, which initializes the encoder.
- Make it so that the encoder must be initialized first before the data
stream can be started. The reason why initialization was separated
from starting the encoder stream was because we need to be able to
check that the settings used with the encoder *can* be used first.
This problem was especially annoying if you had both video/audio
encoding. Before, you'd have to check the return value from
obs_encoder_start, and if that second encoder fails, then you
basically had to stop the first encoder again, making for
unnecessary boilerplate code whenever starting up two encoders.
2014-03-27 21:50:15 -07:00
|
|
|
|
(API Change) Add support for multiple audio mixers
API changed:
--------------------------
void obs_output_set_audio_encoder(
obs_output_t *output,
obs_encoder_t *encoder);
obs_encoder_t *obs_output_get_audio_encoder(
const obs_output_t *output);
obs_encoder_t *obs_audio_encoder_create(
const char *id,
const char *name,
obs_data_t *settings);
Changed to:
--------------------------
/* 'idx' specifies the track index of the output */
void obs_output_set_audio_encoder(
obs_output_t *output,
obs_encoder_t *encoder,
size_t idx);
/* 'idx' specifies the track index of the output */
obs_encoder_t *obs_output_get_audio_encoder(
const obs_output_t *output,
size_t idx);
/* 'mixer_idx' specifies the mixer index to capture audio from */
obs_encoder_t *obs_audio_encoder_create(
const char *id,
const char *name,
obs_data_t *settings,
size_t mixer_idx);
Overview
--------------------------
This feature allows multiple audio mixers to be used at a time. This
capability was able to be added with surprisingly very little extra
overhead. Audio will not be mixed unless it's assigned to a specific
mixer, and mixers will not mix unless they have an active mix
connection.
Mostly this will be useful for being able to separate out specific audio
for recording versus streaming, but will also be useful for certain
streaming services that support multiple audio streams via RTMP.
I didn't want to use a variable amount of mixers due to the desire to
reduce heap allocations, so currently I set the limit to 4 simultaneous
mixers; this number can be increased later if needed, but honestly I
feel like it's just the right number to use.
Sources:
Sources can now specify which audio mixers their audio is mixed to; this
can be a single mixer or multiple mixers at a time. The
obs_source_set_audio_mixers function sets the audio mixer which an audio
source applies to. For example, 0xF would mean that the source applies
to all four mixers.
Audio Encoders:
Audio encoders now must specify which specific audio mixer they use when
they encode audio data.
Outputs:
Outputs that use encoders can now support multiple audio tracks at once
if they have the OBS_OUTPUT_MULTI_TRACK capability flag set. This is
mostly only useful for certain types of RTMP transmissions, though may
be useful for file formats that support multiple audio tracks as well
later on.
2015-01-14 02:12:08 -08:00
|
|
|
if ((output->info.flags & OBS_OUTPUT_MULTI_TRACK) != 0) {
|
|
|
|
if (idx >= MAX_AUDIO_MIXES) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if (idx > 0) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-06-22 22:13:45 -07:00
|
|
|
if (output->audio_encoders[idx] == encoder)
|
|
|
|
return;
|
(API Change) Add support for multiple audio mixers
API changed:
--------------------------
void obs_output_set_audio_encoder(
obs_output_t *output,
obs_encoder_t *encoder);
obs_encoder_t *obs_output_get_audio_encoder(
const obs_output_t *output);
obs_encoder_t *obs_audio_encoder_create(
const char *id,
const char *name,
obs_data_t *settings);
Changed to:
--------------------------
/* 'idx' specifies the track index of the output */
void obs_output_set_audio_encoder(
obs_output_t *output,
obs_encoder_t *encoder,
size_t idx);
/* 'idx' specifies the track index of the output */
obs_encoder_t *obs_output_get_audio_encoder(
const obs_output_t *output,
size_t idx);
/* 'mixer_idx' specifies the mixer index to capture audio from */
obs_encoder_t *obs_audio_encoder_create(
const char *id,
const char *name,
obs_data_t *settings,
size_t mixer_idx);
Overview
--------------------------
This feature allows multiple audio mixers to be used at a time. This
capability was able to be added with surprisingly very little extra
overhead. Audio will not be mixed unless it's assigned to a specific
mixer, and mixers will not mix unless they have an active mix
connection.
Mostly this will be useful for being able to separate out specific audio
for recording versus streaming, but will also be useful for certain
streaming services that support multiple audio streams via RTMP.
I didn't want to use a variable amount of mixers due to the desire to
reduce heap allocations, so currently I set the limit to 4 simultaneous
mixers; this number can be increased later if needed, but honestly I
feel like it's just the right number to use.
Sources:
Sources can now specify which audio mixers their audio is mixed to; this
can be a single mixer or multiple mixers at a time. The
obs_source_set_audio_mixers function sets the audio mixer which an audio
source applies to. For example, 0xF would mean that the source applies
to all four mixers.
Audio Encoders:
Audio encoders now must specify which specific audio mixer they use when
they encode audio data.
Outputs:
Outputs that use encoders can now support multiple audio tracks at once
if they have the OBS_OUTPUT_MULTI_TRACK capability flag set. This is
mostly only useful for certain types of RTMP transmissions, though may
be useful for file formats that support multiple audio tracks as well
later on.
2015-01-14 02:12:08 -08:00
|
|
|
|
|
|
|
obs_encoder_remove_output(output->audio_encoders[idx], output);
|
Implement encoder usage with outputs
- Make it so that encoders can be assigned to outputs. If an encoder
is destroyed, it will automatically remove itself from that output.
I specifically didn't want to do reference counting because it leaves
too much potential for unchecked references and it just felt like it
would be more trouble than it's worth.
- Add a 'flags' value to the output definition structure. This lets
the output specify if it uses video/audio, and whether the output is
meant to be used with OBS encoders or not.
- Remove boilerplate code for outputs. This makes it easier to program
outputs. The boilerplate code involved before was mostly just
involving connecting to the audio/video data streams directly in each
output plugin.
Instead of doing that, simply add plugin callback functions for
receiving video/audio (either encoded or non-encoded, whichever it's
set to use), and then call obs_output_begin_data_capture and
obs_output_end_data_capture to automatically handle setting up
connections to raw or encoded video/audio streams for the plugin.
- Remove 'active' function from output callbacks, as it's no longer
really needed now that the libobs output context automatically knows
when the output is active or not.
- Make it so that an encoder cannot be destroyed until all data
connections to the encoder have been removed.
- Change the 'start' and 'stop' functions in the encoder interface to
just an 'initialize' callback, which initializes the encoder.
- Make it so that the encoder must be initialized first before the data
stream can be started. The reason why initialization was separated
from starting the encoder stream was because we need to be able to
check that the settings used with the encoder *can* be used first.
This problem was especially annoying if you had both video/audio
encoding. Before, you'd have to check the return value from
obs_encoder_start, and if that second encoder fails, then you
basically had to stop the first encoder again, making for
unnecessary boilerplate code whenever starting up two encoders.
2014-03-27 21:50:15 -07:00
|
|
|
obs_encoder_add_output(encoder, output);
|
(API Change) Add support for multiple audio mixers
API changed:
--------------------------
void obs_output_set_audio_encoder(
obs_output_t *output,
obs_encoder_t *encoder);
obs_encoder_t *obs_output_get_audio_encoder(
const obs_output_t *output);
obs_encoder_t *obs_audio_encoder_create(
const char *id,
const char *name,
obs_data_t *settings);
Changed to:
--------------------------
/* 'idx' specifies the track index of the output */
void obs_output_set_audio_encoder(
obs_output_t *output,
obs_encoder_t *encoder,
size_t idx);
/* 'idx' specifies the track index of the output */
obs_encoder_t *obs_output_get_audio_encoder(
const obs_output_t *output,
size_t idx);
/* 'mixer_idx' specifies the mixer index to capture audio from */
obs_encoder_t *obs_audio_encoder_create(
const char *id,
const char *name,
obs_data_t *settings,
size_t mixer_idx);
Overview
--------------------------
This feature allows multiple audio mixers to be used at a time. This
capability was able to be added with surprisingly very little extra
overhead. Audio will not be mixed unless it's assigned to a specific
mixer, and mixers will not mix unless they have an active mix
connection.
Mostly this will be useful for being able to separate out specific audio
for recording versus streaming, but will also be useful for certain
streaming services that support multiple audio streams via RTMP.
I didn't want to use a variable amount of mixers due to the desire to
reduce heap allocations, so currently I set the limit to 4 simultaneous
mixers; this number can be increased later if needed, but honestly I
feel like it's just the right number to use.
Sources:
Sources can now specify which audio mixers their audio is mixed to; this
can be a single mixer or multiple mixers at a time. The
obs_source_set_audio_mixers function sets the audio mixer which an audio
source applies to. For example, 0xF would mean that the source applies
to all four mixers.
Audio Encoders:
Audio encoders now must specify which specific audio mixer they use when
they encode audio data.
Outputs:
Outputs that use encoders can now support multiple audio tracks at once
if they have the OBS_OUTPUT_MULTI_TRACK capability flag set. This is
mostly only useful for certain types of RTMP transmissions, though may
be useful for file formats that support multiple audio tracks as well
later on.
2015-01-14 02:12:08 -08:00
|
|
|
output->audio_encoders[idx] = encoder;
|
Implement encoder usage with outputs
- Make it so that encoders can be assigned to outputs. If an encoder
is destroyed, it will automatically remove itself from that output.
I specifically didn't want to do reference counting because it leaves
too much potential for unchecked references and it just felt like it
would be more trouble than it's worth.
- Add a 'flags' value to the output definition structure. This lets
the output specify if it uses video/audio, and whether the output is
meant to be used with OBS encoders or not.
- Remove boilerplate code for outputs. This makes it easier to program
outputs. The boilerplate code involved before was mostly just
involving connecting to the audio/video data streams directly in each
output plugin.
Instead of doing that, simply add plugin callback functions for
receiving video/audio (either encoded or non-encoded, whichever it's
set to use), and then call obs_output_begin_data_capture and
obs_output_end_data_capture to automatically handle setting up
connections to raw or encoded video/audio streams for the plugin.
- Remove 'active' function from output callbacks, as it's no longer
really needed now that the libobs output context automatically knows
when the output is active or not.
- Make it so that an encoder cannot be destroyed until all data
connections to the encoder have been removed.
- Change the 'start' and 'stop' functions in the encoder interface to
just an 'initialize' callback, which initializes the encoder.
- Make it so that the encoder must be initialized first before the data
stream can be started. The reason why initialization was separated
from starting the encoder stream was because we need to be able to
check that the settings used with the encoder *can* be used first.
This problem was especially annoying if you had both video/audio
encoding. Before, you'd have to check the return value from
obs_encoder_start, and if that second encoder fails, then you
basically had to stop the first encoder again, making for
unnecessary boilerplate code whenever starting up two encoders.
2014-03-27 21:50:15 -07:00
|
|
|
}
|
|
|
|
|
2014-09-26 15:25:59 -07:00
|
|
|
obs_encoder_t *obs_output_get_video_encoder(const obs_output_t *output)
|
Implement encoder usage with outputs
- Make it so that encoders can be assigned to outputs. If an encoder
is destroyed, it will automatically remove itself from that output.
I specifically didn't want to do reference counting because it leaves
too much potential for unchecked references and it just felt like it
would be more trouble than it's worth.
- Add a 'flags' value to the output definition structure. This lets
the output specify if it uses video/audio, and whether the output is
meant to be used with OBS encoders or not.
- Remove boilerplate code for outputs. This makes it easier to program
outputs. The boilerplate code involved before was mostly just
involving connecting to the audio/video data streams directly in each
output plugin.
Instead of doing that, simply add plugin callback functions for
receiving video/audio (either encoded or non-encoded, whichever it's
set to use), and then call obs_output_begin_data_capture and
obs_output_end_data_capture to automatically handle setting up
connections to raw or encoded video/audio streams for the plugin.
- Remove 'active' function from output callbacks, as it's no longer
really needed now that the libobs output context automatically knows
when the output is active or not.
- Make it so that an encoder cannot be destroyed until all data
connections to the encoder have been removed.
- Change the 'start' and 'stop' functions in the encoder interface to
just an 'initialize' callback, which initializes the encoder.
- Make it so that the encoder must be initialized first before the data
stream can be started. The reason why initialization was separated
from starting the encoder stream was because we need to be able to
check that the settings used with the encoder *can* be used first.
This problem was especially annoying if you had both video/audio
encoding. Before, you'd have to check the return value from
obs_encoder_start, and if that second encoder fails, then you
basically had to stop the first encoder again, making for
unnecessary boilerplate code whenever starting up two encoders.
2014-03-27 21:50:15 -07:00
|
|
|
{
|
2019-06-22 22:13:45 -07:00
|
|
|
return obs_output_valid(output, "obs_output_get_video_encoder")
|
|
|
|
? output->video_encoder
|
|
|
|
: NULL;
|
Implement encoder usage with outputs
- Make it so that encoders can be assigned to outputs. If an encoder
is destroyed, it will automatically remove itself from that output.
I specifically didn't want to do reference counting because it leaves
too much potential for unchecked references and it just felt like it
would be more trouble than it's worth.
- Add a 'flags' value to the output definition structure. This lets
the output specify if it uses video/audio, and whether the output is
meant to be used with OBS encoders or not.
- Remove boilerplate code for outputs. This makes it easier to program
outputs. The boilerplate code involved before was mostly just
involving connecting to the audio/video data streams directly in each
output plugin.
Instead of doing that, simply add plugin callback functions for
receiving video/audio (either encoded or non-encoded, whichever it's
set to use), and then call obs_output_begin_data_capture and
obs_output_end_data_capture to automatically handle setting up
connections to raw or encoded video/audio streams for the plugin.
- Remove 'active' function from output callbacks, as it's no longer
really needed now that the libobs output context automatically knows
when the output is active or not.
- Make it so that an encoder cannot be destroyed until all data
connections to the encoder have been removed.
- Change the 'start' and 'stop' functions in the encoder interface to
just an 'initialize' callback, which initializes the encoder.
- Make it so that the encoder must be initialized first before the data
stream can be started. The reason why initialization was separated
from starting the encoder stream was because we need to be able to
check that the settings used with the encoder *can* be used first.
This problem was especially annoying if you had both video/audio
encoding. Before, you'd have to check the return value from
obs_encoder_start, and if that second encoder fails, then you
basically had to stop the first encoder again, making for
unnecessary boilerplate code whenever starting up two encoders.
2014-03-27 21:50:15 -07:00
|
|
|
}
|
|
|
|
|
(API Change) Add support for multiple audio mixers
API changed:
--------------------------
void obs_output_set_audio_encoder(
obs_output_t *output,
obs_encoder_t *encoder);
obs_encoder_t *obs_output_get_audio_encoder(
const obs_output_t *output);
obs_encoder_t *obs_audio_encoder_create(
const char *id,
const char *name,
obs_data_t *settings);
Changed to:
--------------------------
/* 'idx' specifies the track index of the output */
void obs_output_set_audio_encoder(
obs_output_t *output,
obs_encoder_t *encoder,
size_t idx);
/* 'idx' specifies the track index of the output */
obs_encoder_t *obs_output_get_audio_encoder(
const obs_output_t *output,
size_t idx);
/* 'mixer_idx' specifies the mixer index to capture audio from */
obs_encoder_t *obs_audio_encoder_create(
const char *id,
const char *name,
obs_data_t *settings,
size_t mixer_idx);
Overview
--------------------------
This feature allows multiple audio mixers to be used at a time. This
capability was able to be added with surprisingly very little extra
overhead. Audio will not be mixed unless it's assigned to a specific
mixer, and mixers will not mix unless they have an active mix
connection.
Mostly this will be useful for being able to separate out specific audio
for recording versus streaming, but will also be useful for certain
streaming services that support multiple audio streams via RTMP.
I didn't want to use a variable amount of mixers due to the desire to
reduce heap allocations, so currently I set the limit to 4 simultaneous
mixers; this number can be increased later if needed, but honestly I
feel like it's just the right number to use.
Sources:
Sources can now specify which audio mixers their audio is mixed to; this
can be a single mixer or multiple mixers at a time. The
obs_source_set_audio_mixers function sets the audio mixer which an audio
source applies to. For example, 0xF would mean that the source applies
to all four mixers.
Audio Encoders:
Audio encoders now must specify which specific audio mixer they use when
they encode audio data.
Outputs:
Outputs that use encoders can now support multiple audio tracks at once
if they have the OBS_OUTPUT_MULTI_TRACK capability flag set. This is
mostly only useful for certain types of RTMP transmissions, though may
be useful for file formats that support multiple audio tracks as well
later on.
2015-01-14 02:12:08 -08:00
|
|
|
obs_encoder_t *obs_output_get_audio_encoder(const obs_output_t *output,
|
2019-06-22 22:13:45 -07:00
|
|
|
size_t idx)
|
Implement encoder usage with outputs
- Make it so that encoders can be assigned to outputs. If an encoder
is destroyed, it will automatically remove itself from that output.
I specifically didn't want to do reference counting because it leaves
too much potential for unchecked references and it just felt like it
would be more trouble than it's worth.
- Add a 'flags' value to the output definition structure. This lets
the output specify if it uses video/audio, and whether the output is
meant to be used with OBS encoders or not.
- Remove boilerplate code for outputs. This makes it easier to program
outputs. The boilerplate code involved before was mostly just
involving connecting to the audio/video data streams directly in each
output plugin.
Instead of doing that, simply add plugin callback functions for
receiving video/audio (either encoded or non-encoded, whichever it's
set to use), and then call obs_output_begin_data_capture and
obs_output_end_data_capture to automatically handle setting up
connections to raw or encoded video/audio streams for the plugin.
- Remove 'active' function from output callbacks, as it's no longer
really needed now that the libobs output context automatically knows
when the output is active or not.
- Make it so that an encoder cannot be destroyed until all data
connections to the encoder have been removed.
- Change the 'start' and 'stop' functions in the encoder interface to
just an 'initialize' callback, which initializes the encoder.
- Make it so that the encoder must be initialized first before the data
stream can be started. The reason why initialization was separated
from starting the encoder stream was because we need to be able to
check that the settings used with the encoder *can* be used first.
This problem was especially annoying if you had both video/audio
encoding. Before, you'd have to check the return value from
obs_encoder_start, and if that second encoder fails, then you
basically had to stop the first encoder again, making for
unnecessary boilerplate code whenever starting up two encoders.
2014-03-27 21:50:15 -07:00
|
|
|
{
|
2015-10-17 02:51:13 -07:00
|
|
|
if (!obs_output_valid(output, "obs_output_get_audio_encoder"))
|
|
|
|
return NULL;
|
(API Change) Add support for multiple audio mixers
API changed:
--------------------------
void obs_output_set_audio_encoder(
obs_output_t *output,
obs_encoder_t *encoder);
obs_encoder_t *obs_output_get_audio_encoder(
const obs_output_t *output);
obs_encoder_t *obs_audio_encoder_create(
const char *id,
const char *name,
obs_data_t *settings);
Changed to:
--------------------------
/* 'idx' specifies the track index of the output */
void obs_output_set_audio_encoder(
obs_output_t *output,
obs_encoder_t *encoder,
size_t idx);
/* 'idx' specifies the track index of the output */
obs_encoder_t *obs_output_get_audio_encoder(
const obs_output_t *output,
size_t idx);
/* 'mixer_idx' specifies the mixer index to capture audio from */
obs_encoder_t *obs_audio_encoder_create(
const char *id,
const char *name,
obs_data_t *settings,
size_t mixer_idx);
Overview
--------------------------
This feature allows multiple audio mixers to be used at a time. This
capability was able to be added with surprisingly very little extra
overhead. Audio will not be mixed unless it's assigned to a specific
mixer, and mixers will not mix unless they have an active mix
connection.
Mostly this will be useful for being able to separate out specific audio
for recording versus streaming, but will also be useful for certain
streaming services that support multiple audio streams via RTMP.
I didn't want to use a variable amount of mixers due to the desire to
reduce heap allocations, so currently I set the limit to 4 simultaneous
mixers; this number can be increased later if needed, but honestly I
feel like it's just the right number to use.
Sources:
Sources can now specify which audio mixers their audio is mixed to; this
can be a single mixer or multiple mixers at a time. The
obs_source_set_audio_mixers function sets the audio mixer which an audio
source applies to. For example, 0xF would mean that the source applies
to all four mixers.
Audio Encoders:
Audio encoders now must specify which specific audio mixer they use when
they encode audio data.
Outputs:
Outputs that use encoders can now support multiple audio tracks at once
if they have the OBS_OUTPUT_MULTI_TRACK capability flag set. This is
mostly only useful for certain types of RTMP transmissions, though may
be useful for file formats that support multiple audio tracks as well
later on.
2015-01-14 02:12:08 -08:00
|
|
|
|
|
|
|
if ((output->info.flags & OBS_OUTPUT_MULTI_TRACK) != 0) {
|
|
|
|
if (idx >= MAX_AUDIO_MIXES) {
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if (idx > 0) {
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return output->audio_encoders[idx];
|
Implement encoder usage with outputs
- Make it so that encoders can be assigned to outputs. If an encoder
is destroyed, it will automatically remove itself from that output.
I specifically didn't want to do reference counting because it leaves
too much potential for unchecked references and it just felt like it
would be more trouble than it's worth.
- Add a 'flags' value to the output definition structure. This lets
the output specify if it uses video/audio, and whether the output is
meant to be used with OBS encoders or not.
- Remove boilerplate code for outputs. This makes it easier to program
outputs. The boilerplate code involved before was mostly just
involving connecting to the audio/video data streams directly in each
output plugin.
Instead of doing that, simply add plugin callback functions for
receiving video/audio (either encoded or non-encoded, whichever it's
set to use), and then call obs_output_begin_data_capture and
obs_output_end_data_capture to automatically handle setting up
connections to raw or encoded video/audio streams for the plugin.
- Remove 'active' function from output callbacks, as it's no longer
really needed now that the libobs output context automatically knows
when the output is active or not.
- Make it so that an encoder cannot be destroyed until all data
connections to the encoder have been removed.
- Change the 'start' and 'stop' functions in the encoder interface to
just an 'initialize' callback, which initializes the encoder.
- Make it so that the encoder must be initialized first before the data
stream can be started. The reason why initialization was separated
from starting the encoder stream was because we need to be able to
check that the settings used with the encoder *can* be used first.
This problem was especially annoying if you had both video/audio
encoding. Before, you'd have to check the return value from
obs_encoder_start, and if that second encoder fails, then you
basically had to stop the first encoder again, making for
unnecessary boilerplate code whenever starting up two encoders.
2014-03-27 21:50:15 -07:00
|
|
|
}
|
|
|
|
|
2014-09-25 17:44:05 -07:00
|
|
|
void obs_output_set_service(obs_output_t *output, obs_service_t *service)
|
obs-studio UI: Implement stream settings UI
- Updated the services API so that it links up with an output and
the output gets data from that service rather than via settings.
This allows the service context to have control over how an output is
used, and makes it so that the URL/key/etc isn't necessarily some
static setting.
Also, if the service is attached to an output, it will stick around
until the output is destroyed.
- The settings interface has been updated so that it can allow the
usage of service plugins. What this means is that now you can create
a service plugin that can control aspects of the stream, and it
allows each service to create their own user interface if they create
a service plugin module.
- Testing out saving of current service information. Saves/loads from
JSON in to obs_data_t, seems to be working quite nicely, and the
service object information is saved/preserved on exit, and loaded
again on startup.
- I agonized over the settings user interface for days, and eventually
I just decided that the only way that users weren't going to be
fumbling over options was to split up the settings in to simple/basic
output, pre-configured, and then advanced for advanced use (such as
multiple outputs or services, which I'll implement later).
This was particularly painful to really design right, I wanted more
features and wanted to include everything in one interface but
ultimately just realized from experience that users are just not
technically knowledgable about it and will end up fumbling with the
settings rather than getting things done.
Basically, what this means is that casual users only have to enter in
about 3 things to configure their stream: Stream key, audio bitrate,
and video bitrate. I am really happy with this interface for those
types of users, but it definitely won't be sufficient for advanced
usage or for custom outputs, so that stuff will have to be separated.
- Improved the JSON usage for the 'common streaming services' context,
I realized that JSON arrays are there to ensure sorting, while
forgetting that general items are optimized for hashing. So
basically I'm just using arrays now to sort items in it.
2014-04-24 01:49:07 -07:00
|
|
|
{
|
2015-10-17 02:51:13 -07:00
|
|
|
if (!obs_output_valid(output, "obs_output_set_service"))
|
|
|
|
return;
|
2016-06-20 16:07:29 -07:00
|
|
|
if (active(output) || !service || service->active)
|
2015-10-17 02:51:13 -07:00
|
|
|
return;
|
obs-studio UI: Implement stream settings UI
- Updated the services API so that it links up with an output and
the output gets data from that service rather than via settings.
This allows the service context to have control over how an output is
used, and makes it so that the URL/key/etc isn't necessarily some
static setting.
Also, if the service is attached to an output, it will stick around
until the output is destroyed.
- The settings interface has been updated so that it can allow the
usage of service plugins. What this means is that now you can create
a service plugin that can control aspects of the stream, and it
allows each service to create their own user interface if they create
a service plugin module.
- Testing out saving of current service information. Saves/loads from
JSON in to obs_data_t, seems to be working quite nicely, and the
service object information is saved/preserved on exit, and loaded
again on startup.
- I agonized over the settings user interface for days, and eventually
I just decided that the only way that users weren't going to be
fumbling over options was to split up the settings in to simple/basic
output, pre-configured, and then advanced for advanced use (such as
multiple outputs or services, which I'll implement later).
This was particularly painful to really design right, I wanted more
features and wanted to include everything in one interface but
ultimately just realized from experience that users are just not
technically knowledgable about it and will end up fumbling with the
settings rather than getting things done.
Basically, what this means is that casual users only have to enter in
about 3 things to configure their stream: Stream key, audio bitrate,
and video bitrate. I am really happy with this interface for those
types of users, but it definitely won't be sufficient for advanced
usage or for custom outputs, so that stuff will have to be separated.
- Improved the JSON usage for the 'common streaming services' context,
I realized that JSON arrays are there to ensure sorting, while
forgetting that general items are optimized for hashing. So
basically I'm just using arrays now to sort items in it.
2014-04-24 01:49:07 -07:00
|
|
|
|
|
|
|
if (service->output)
|
|
|
|
service->output->service = NULL;
|
|
|
|
|
|
|
|
output->service = service;
|
|
|
|
service->output = output;
|
|
|
|
}
|
|
|
|
|
2014-09-26 15:25:59 -07:00
|
|
|
obs_service_t *obs_output_get_service(const obs_output_t *output)
|
obs-studio UI: Implement stream settings UI
- Updated the services API so that it links up with an output and
the output gets data from that service rather than via settings.
This allows the service context to have control over how an output is
used, and makes it so that the URL/key/etc isn't necessarily some
static setting.
Also, if the service is attached to an output, it will stick around
until the output is destroyed.
- The settings interface has been updated so that it can allow the
usage of service plugins. What this means is that now you can create
a service plugin that can control aspects of the stream, and it
allows each service to create their own user interface if they create
a service plugin module.
- Testing out saving of current service information. Saves/loads from
JSON in to obs_data_t, seems to be working quite nicely, and the
service object information is saved/preserved on exit, and loaded
again on startup.
- I agonized over the settings user interface for days, and eventually
I just decided that the only way that users weren't going to be
fumbling over options was to split up the settings in to simple/basic
output, pre-configured, and then advanced for advanced use (such as
multiple outputs or services, which I'll implement later).
This was particularly painful to really design right, I wanted more
features and wanted to include everything in one interface but
ultimately just realized from experience that users are just not
technically knowledgable about it and will end up fumbling with the
settings rather than getting things done.
Basically, what this means is that casual users only have to enter in
about 3 things to configure their stream: Stream key, audio bitrate,
and video bitrate. I am really happy with this interface for those
types of users, but it definitely won't be sufficient for advanced
usage or for custom outputs, so that stuff will have to be separated.
- Improved the JSON usage for the 'common streaming services' context,
I realized that JSON arrays are there to ensure sorting, while
forgetting that general items are optimized for hashing. So
basically I'm just using arrays now to sort items in it.
2014-04-24 01:49:07 -07:00
|
|
|
{
|
2019-06-22 22:13:45 -07:00
|
|
|
return obs_output_valid(output, "obs_output_get_service")
|
|
|
|
? output->service
|
|
|
|
: NULL;
|
obs-studio UI: Implement stream settings UI
- Updated the services API so that it links up with an output and
the output gets data from that service rather than via settings.
This allows the service context to have control over how an output is
used, and makes it so that the URL/key/etc isn't necessarily some
static setting.
Also, if the service is attached to an output, it will stick around
until the output is destroyed.
- The settings interface has been updated so that it can allow the
usage of service plugins. What this means is that now you can create
a service plugin that can control aspects of the stream, and it
allows each service to create their own user interface if they create
a service plugin module.
- Testing out saving of current service information. Saves/loads from
JSON in to obs_data_t, seems to be working quite nicely, and the
service object information is saved/preserved on exit, and loaded
again on startup.
- I agonized over the settings user interface for days, and eventually
I just decided that the only way that users weren't going to be
fumbling over options was to split up the settings in to simple/basic
output, pre-configured, and then advanced for advanced use (such as
multiple outputs or services, which I'll implement later).
This was particularly painful to really design right, I wanted more
features and wanted to include everything in one interface but
ultimately just realized from experience that users are just not
technically knowledgable about it and will end up fumbling with the
settings rather than getting things done.
Basically, what this means is that casual users only have to enter in
about 3 things to configure their stream: Stream key, audio bitrate,
and video bitrate. I am really happy with this interface for those
types of users, but it definitely won't be sufficient for advanced
usage or for custom outputs, so that stuff will have to be separated.
- Improved the JSON usage for the 'common streaming services' context,
I realized that JSON arrays are there to ensure sorting, while
forgetting that general items are optimized for hashing. So
basically I'm just using arrays now to sort items in it.
2014-04-24 01:49:07 -07:00
|
|
|
}
|
|
|
|
|
2019-06-22 22:13:45 -07:00
|
|
|
void obs_output_set_reconnect_settings(obs_output_t *output, int retry_count,
|
|
|
|
int retry_sec)
|
2014-07-02 16:38:29 -07:00
|
|
|
{
|
2015-10-17 02:51:13 -07:00
|
|
|
if (!obs_output_valid(output, "obs_output_set_reconnect_settings"))
|
|
|
|
return;
|
2014-07-02 16:38:29 -07:00
|
|
|
|
|
|
|
output->reconnect_retry_max = retry_count;
|
|
|
|
output->reconnect_retry_sec = retry_sec;
|
|
|
|
}
|
|
|
|
|
2014-09-26 15:25:59 -07:00
|
|
|
uint64_t obs_output_get_total_bytes(const obs_output_t *output)
|
2014-07-06 14:55:56 -07:00
|
|
|
{
|
2015-10-17 02:51:13 -07:00
|
|
|
if (!obs_output_valid(output, "obs_output_get_total_bytes"))
|
|
|
|
return 0;
|
|
|
|
if (!output->info.get_total_bytes)
|
2014-07-06 14:55:56 -07:00
|
|
|
return 0;
|
|
|
|
|
2016-06-20 16:07:29 -07:00
|
|
|
if (delay_active(output) && !delay_capturing(output))
|
libobs: Add encoded output delay support
This feature allows a user to delay an output (as long as the output
itself supports it). Needless to say this intended for live streams,
where users may want to delay their streams to prevent stream sniping,
cheating, and other such things.
The design this time was a bit more elaborate, but still simple in
design: the user can now schedule stops/starts without having to wait
for the stream itself to stop before being able to take any action.
Optionally, they can also forcibly stop stream (and delay) in case
something happens which they might not want to be streamed.
Additionally, a new option was added to preserve stream cutoff point on
disconnections/reconnections, so that if you get disconnected while
streaming, when it reconnects, it will reconnect right at the point
where it left off. This will probably be quite useful for a number of
applications in addition to regular delay, such as setting the delay to
1 second and then using this feature to minimize, for example, a
critical stream such as a tournament stream from getting any of its
stream data cut off. However, using this feature will of course cause
the stream data to buffer and increase delay (and memory usage) while
it's in the process of reconnecting.
2015-09-06 15:39:46 -07:00
|
|
|
return 0;
|
|
|
|
|
2014-08-04 21:27:52 -07:00
|
|
|
return output->info.get_total_bytes(output->context.data);
|
2014-07-06 14:55:56 -07:00
|
|
|
}
|
|
|
|
|
2014-09-26 15:25:59 -07:00
|
|
|
int obs_output_get_frames_dropped(const obs_output_t *output)
|
2014-07-06 14:55:56 -07:00
|
|
|
{
|
2015-10-17 02:51:13 -07:00
|
|
|
if (!obs_output_valid(output, "obs_output_get_frames_dropped"))
|
|
|
|
return 0;
|
|
|
|
if (!output->info.get_dropped_frames)
|
2014-07-06 14:55:56 -07:00
|
|
|
return 0;
|
|
|
|
|
2014-08-04 21:27:52 -07:00
|
|
|
return output->info.get_dropped_frames(output->context.data);
|
2014-07-06 14:55:56 -07:00
|
|
|
}
|
|
|
|
|
2014-09-26 15:25:59 -07:00
|
|
|
int obs_output_get_total_frames(const obs_output_t *output)
|
2014-07-06 14:55:56 -07:00
|
|
|
{
|
2019-06-22 22:13:45 -07:00
|
|
|
return obs_output_valid(output, "obs_output_get_total_frames")
|
|
|
|
? output->total_frames
|
|
|
|
: 0;
|
2014-07-06 14:55:56 -07:00
|
|
|
}
|
|
|
|
|
2014-09-25 17:44:05 -07:00
|
|
|
void obs_output_set_preferred_size(obs_output_t *output, uint32_t width,
|
2019-06-22 22:13:45 -07:00
|
|
|
uint32_t height)
|
2014-08-10 16:50:44 -07:00
|
|
|
{
|
2015-10-17 02:51:13 -07:00
|
|
|
if (!obs_output_valid(output, "obs_output_set_preferred_size"))
|
|
|
|
return;
|
|
|
|
if ((output->info.flags & OBS_OUTPUT_VIDEO) == 0)
|
2014-08-10 16:50:44 -07:00
|
|
|
return;
|
|
|
|
|
2016-06-20 16:07:29 -07:00
|
|
|
if (active(output)) {
|
2019-06-22 22:13:45 -07:00
|
|
|
blog(LOG_WARNING,
|
|
|
|
"output '%s': Cannot set the preferred "
|
|
|
|
"resolution while the output is active",
|
|
|
|
obs_output_get_name(output));
|
2014-08-10 16:50:44 -07:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2019-06-22 22:13:45 -07:00
|
|
|
output->scaled_width = width;
|
2014-08-10 16:50:44 -07:00
|
|
|
output->scaled_height = height;
|
|
|
|
|
|
|
|
if (output->info.flags & OBS_OUTPUT_ENCODED) {
|
|
|
|
if (output->video_encoder)
|
|
|
|
obs_encoder_set_scaled_size(output->video_encoder,
|
2019-06-22 22:13:45 -07:00
|
|
|
width, height);
|
2014-08-10 16:50:44 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-09-26 15:25:59 -07:00
|
|
|
uint32_t obs_output_get_width(const obs_output_t *output)
|
2014-08-10 16:50:44 -07:00
|
|
|
{
|
2015-10-17 02:51:13 -07:00
|
|
|
if (!obs_output_valid(output, "obs_output_get_width"))
|
|
|
|
return 0;
|
|
|
|
if ((output->info.flags & OBS_OUTPUT_VIDEO) == 0)
|
2014-08-10 16:50:44 -07:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (output->info.flags & OBS_OUTPUT_ENCODED)
|
|
|
|
return obs_encoder_get_width(output->video_encoder);
|
|
|
|
else
|
2019-06-22 22:13:45 -07:00
|
|
|
return output->scaled_width != 0
|
|
|
|
? output->scaled_width
|
|
|
|
: video_output_get_width(output->video);
|
2014-08-10 16:50:44 -07:00
|
|
|
}
|
|
|
|
|
2014-09-26 15:25:59 -07:00
|
|
|
uint32_t obs_output_get_height(const obs_output_t *output)
|
2014-08-10 16:50:44 -07:00
|
|
|
{
|
2015-10-17 02:51:13 -07:00
|
|
|
if (!obs_output_valid(output, "obs_output_get_height"))
|
|
|
|
return 0;
|
|
|
|
if ((output->info.flags & OBS_OUTPUT_VIDEO) == 0)
|
2014-08-10 16:50:44 -07:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (output->info.flags & OBS_OUTPUT_ENCODED)
|
|
|
|
return obs_encoder_get_height(output->video_encoder);
|
|
|
|
else
|
2019-06-22 22:13:45 -07:00
|
|
|
return output->scaled_height != 0
|
|
|
|
? output->scaled_height
|
|
|
|
: video_output_get_height(output->video);
|
2014-08-10 16:50:44 -07:00
|
|
|
}
|
|
|
|
|
2014-09-25 17:44:05 -07:00
|
|
|
void obs_output_set_video_conversion(obs_output_t *output,
|
2019-06-22 22:13:45 -07:00
|
|
|
const struct video_scale_info *conversion)
|
Implement encoder usage with outputs
- Make it so that encoders can be assigned to outputs. If an encoder
is destroyed, it will automatically remove itself from that output.
I specifically didn't want to do reference counting because it leaves
too much potential for unchecked references and it just felt like it
would be more trouble than it's worth.
- Add a 'flags' value to the output definition structure. This lets
the output specify if it uses video/audio, and whether the output is
meant to be used with OBS encoders or not.
- Remove boilerplate code for outputs. This makes it easier to program
outputs. The boilerplate code involved before was mostly just
involving connecting to the audio/video data streams directly in each
output plugin.
Instead of doing that, simply add plugin callback functions for
receiving video/audio (either encoded or non-encoded, whichever it's
set to use), and then call obs_output_begin_data_capture and
obs_output_end_data_capture to automatically handle setting up
connections to raw or encoded video/audio streams for the plugin.
- Remove 'active' function from output callbacks, as it's no longer
really needed now that the libobs output context automatically knows
when the output is active or not.
- Make it so that an encoder cannot be destroyed until all data
connections to the encoder have been removed.
- Change the 'start' and 'stop' functions in the encoder interface to
just an 'initialize' callback, which initializes the encoder.
- Make it so that the encoder must be initialized first before the data
stream can be started. The reason why initialization was separated
from starting the encoder stream was because we need to be able to
check that the settings used with the encoder *can* be used first.
This problem was especially annoying if you had both video/audio
encoding. Before, you'd have to check the return value from
obs_encoder_start, and if that second encoder fails, then you
basically had to stop the first encoder again, making for
unnecessary boilerplate code whenever starting up two encoders.
2014-03-27 21:50:15 -07:00
|
|
|
{
|
2015-10-17 02:51:13 -07:00
|
|
|
if (!obs_output_valid(output, "obs_output_set_video_conversion"))
|
|
|
|
return;
|
|
|
|
if (!obs_ptr_valid(conversion, "obs_output_set_video_conversion"))
|
|
|
|
return;
|
Implement encoder usage with outputs
- Make it so that encoders can be assigned to outputs. If an encoder
is destroyed, it will automatically remove itself from that output.
I specifically didn't want to do reference counting because it leaves
too much potential for unchecked references and it just felt like it
would be more trouble than it's worth.
- Add a 'flags' value to the output definition structure. This lets
the output specify if it uses video/audio, and whether the output is
meant to be used with OBS encoders or not.
- Remove boilerplate code for outputs. This makes it easier to program
outputs. The boilerplate code involved before was mostly just
involving connecting to the audio/video data streams directly in each
output plugin.
Instead of doing that, simply add plugin callback functions for
receiving video/audio (either encoded or non-encoded, whichever it's
set to use), and then call obs_output_begin_data_capture and
obs_output_end_data_capture to automatically handle setting up
connections to raw or encoded video/audio streams for the plugin.
- Remove 'active' function from output callbacks, as it's no longer
really needed now that the libobs output context automatically knows
when the output is active or not.
- Make it so that an encoder cannot be destroyed until all data
connections to the encoder have been removed.
- Change the 'start' and 'stop' functions in the encoder interface to
just an 'initialize' callback, which initializes the encoder.
- Make it so that the encoder must be initialized first before the data
stream can be started. The reason why initialization was separated
from starting the encoder stream was because we need to be able to
check that the settings used with the encoder *can* be used first.
This problem was especially annoying if you had both video/audio
encoding. Before, you'd have to check the return value from
obs_encoder_start, and if that second encoder fails, then you
basically had to stop the first encoder again, making for
unnecessary boilerplate code whenever starting up two encoders.
2014-03-27 21:50:15 -07:00
|
|
|
|
|
|
|
output->video_conversion = *conversion;
|
|
|
|
output->video_conversion_set = true;
|
|
|
|
}
|
|
|
|
|
2019-06-22 22:13:45 -07:00
|
|
|
void obs_output_set_audio_conversion(
|
|
|
|
obs_output_t *output, const struct audio_convert_info *conversion)
|
Implement encoder usage with outputs
- Make it so that encoders can be assigned to outputs. If an encoder
is destroyed, it will automatically remove itself from that output.
I specifically didn't want to do reference counting because it leaves
too much potential for unchecked references and it just felt like it
would be more trouble than it's worth.
- Add a 'flags' value to the output definition structure. This lets
the output specify if it uses video/audio, and whether the output is
meant to be used with OBS encoders or not.
- Remove boilerplate code for outputs. This makes it easier to program
outputs. The boilerplate code involved before was mostly just
involving connecting to the audio/video data streams directly in each
output plugin.
Instead of doing that, simply add plugin callback functions for
receiving video/audio (either encoded or non-encoded, whichever it's
set to use), and then call obs_output_begin_data_capture and
obs_output_end_data_capture to automatically handle setting up
connections to raw or encoded video/audio streams for the plugin.
- Remove 'active' function from output callbacks, as it's no longer
really needed now that the libobs output context automatically knows
when the output is active or not.
- Make it so that an encoder cannot be destroyed until all data
connections to the encoder have been removed.
- Change the 'start' and 'stop' functions in the encoder interface to
just an 'initialize' callback, which initializes the encoder.
- Make it so that the encoder must be initialized first before the data
stream can be started. The reason why initialization was separated
from starting the encoder stream was because we need to be able to
check that the settings used with the encoder *can* be used first.
This problem was especially annoying if you had both video/audio
encoding. Before, you'd have to check the return value from
obs_encoder_start, and if that second encoder fails, then you
basically had to stop the first encoder again, making for
unnecessary boilerplate code whenever starting up two encoders.
2014-03-27 21:50:15 -07:00
|
|
|
{
|
2015-10-17 02:51:13 -07:00
|
|
|
if (!obs_output_valid(output, "obs_output_set_audio_conversion"))
|
|
|
|
return;
|
|
|
|
if (!obs_ptr_valid(conversion, "obs_output_set_audio_conversion"))
|
|
|
|
return;
|
Implement encoder usage with outputs
- Make it so that encoders can be assigned to outputs. If an encoder
is destroyed, it will automatically remove itself from that output.
I specifically didn't want to do reference counting because it leaves
too much potential for unchecked references and it just felt like it
would be more trouble than it's worth.
- Add a 'flags' value to the output definition structure. This lets
the output specify if it uses video/audio, and whether the output is
meant to be used with OBS encoders or not.
- Remove boilerplate code for outputs. This makes it easier to program
outputs. The boilerplate code involved before was mostly just
involving connecting to the audio/video data streams directly in each
output plugin.
Instead of doing that, simply add plugin callback functions for
receiving video/audio (either encoded or non-encoded, whichever it's
set to use), and then call obs_output_begin_data_capture and
obs_output_end_data_capture to automatically handle setting up
connections to raw or encoded video/audio streams for the plugin.
- Remove 'active' function from output callbacks, as it's no longer
really needed now that the libobs output context automatically knows
when the output is active or not.
- Make it so that an encoder cannot be destroyed until all data
connections to the encoder have been removed.
- Change the 'start' and 'stop' functions in the encoder interface to
just an 'initialize' callback, which initializes the encoder.
- Make it so that the encoder must be initialized first before the data
stream can be started. The reason why initialization was separated
from starting the encoder stream was because we need to be able to
check that the settings used with the encoder *can* be used first.
This problem was especially annoying if you had both video/audio
encoding. Before, you'd have to check the return value from
obs_encoder_start, and if that second encoder fails, then you
basically had to stop the first encoder again, making for
unnecessary boilerplate code whenever starting up two encoders.
2014-03-27 21:50:15 -07:00
|
|
|
|
|
|
|
output->audio_conversion = *conversion;
|
|
|
|
output->audio_conversion_set = true;
|
|
|
|
}
|
|
|
|
|
(API Change) Add support for multiple audio mixers
API changed:
--------------------------
void obs_output_set_audio_encoder(
obs_output_t *output,
obs_encoder_t *encoder);
obs_encoder_t *obs_output_get_audio_encoder(
const obs_output_t *output);
obs_encoder_t *obs_audio_encoder_create(
const char *id,
const char *name,
obs_data_t *settings);
Changed to:
--------------------------
/* 'idx' specifies the track index of the output */
void obs_output_set_audio_encoder(
obs_output_t *output,
obs_encoder_t *encoder,
size_t idx);
/* 'idx' specifies the track index of the output */
obs_encoder_t *obs_output_get_audio_encoder(
const obs_output_t *output,
size_t idx);
/* 'mixer_idx' specifies the mixer index to capture audio from */
obs_encoder_t *obs_audio_encoder_create(
const char *id,
const char *name,
obs_data_t *settings,
size_t mixer_idx);
Overview
--------------------------
This feature allows multiple audio mixers to be used at a time. This
capability was able to be added with surprisingly very little extra
overhead. Audio will not be mixed unless it's assigned to a specific
mixer, and mixers will not mix unless they have an active mix
connection.
Mostly this will be useful for being able to separate out specific audio
for recording versus streaming, but will also be useful for certain
streaming services that support multiple audio streams via RTMP.
I didn't want to use a variable amount of mixers due to the desire to
reduce heap allocations, so currently I set the limit to 4 simultaneous
mixers; this number can be increased later if needed, but honestly I
feel like it's just the right number to use.
Sources:
Sources can now specify which audio mixers their audio is mixed to; this
can be a single mixer or multiple mixers at a time. The
obs_source_set_audio_mixers function sets the audio mixer which an audio
source applies to. For example, 0xF would mean that the source applies
to all four mixers.
Audio Encoders:
Audio encoders now must specify which specific audio mixer they use when
they encode audio data.
Outputs:
Outputs that use encoders can now support multiple audio tracks at once
if they have the OBS_OUTPUT_MULTI_TRACK capability flag set. This is
mostly only useful for certain types of RTMP transmissions, though may
be useful for file formats that support multiple audio tracks as well
later on.
2015-01-14 02:12:08 -08:00
|
|
|
static inline size_t num_audio_mixes(const struct obs_output *output)
|
|
|
|
{
|
|
|
|
size_t mix_count = 1;
|
|
|
|
|
|
|
|
if ((output->info.flags & OBS_OUTPUT_MULTI_TRACK) != 0) {
|
|
|
|
mix_count = 0;
|
|
|
|
|
|
|
|
for (size_t i = 0; i < MAX_AUDIO_MIXES; i++) {
|
|
|
|
if (!output->audio_encoders[i])
|
|
|
|
break;
|
|
|
|
|
|
|
|
mix_count++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return mix_count;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline bool audio_valid(const struct obs_output *output, bool encoded)
|
|
|
|
{
|
|
|
|
if (encoded) {
|
|
|
|
size_t mix_count = num_audio_mixes(output);
|
|
|
|
if (!mix_count)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
for (size_t i = 0; i < mix_count; i++) {
|
|
|
|
if (!output->audio_encoders[i]) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if (!output->audio)
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2014-09-26 15:25:59 -07:00
|
|
|
static bool can_begin_data_capture(const struct obs_output *output,
|
2019-06-22 22:13:45 -07:00
|
|
|
bool encoded, bool has_video, bool has_audio,
|
|
|
|
bool has_service)
|
Implement encoder usage with outputs
- Make it so that encoders can be assigned to outputs. If an encoder
is destroyed, it will automatically remove itself from that output.
I specifically didn't want to do reference counting because it leaves
too much potential for unchecked references and it just felt like it
would be more trouble than it's worth.
- Add a 'flags' value to the output definition structure. This lets
the output specify if it uses video/audio, and whether the output is
meant to be used with OBS encoders or not.
- Remove boilerplate code for outputs. This makes it easier to program
outputs. The boilerplate code involved before was mostly just
involving connecting to the audio/video data streams directly in each
output plugin.
Instead of doing that, simply add plugin callback functions for
receiving video/audio (either encoded or non-encoded, whichever it's
set to use), and then call obs_output_begin_data_capture and
obs_output_end_data_capture to automatically handle setting up
connections to raw or encoded video/audio streams for the plugin.
- Remove 'active' function from output callbacks, as it's no longer
really needed now that the libobs output context automatically knows
when the output is active or not.
- Make it so that an encoder cannot be destroyed until all data
connections to the encoder have been removed.
- Change the 'start' and 'stop' functions in the encoder interface to
just an 'initialize' callback, which initializes the encoder.
- Make it so that the encoder must be initialized first before the data
stream can be started. The reason why initialization was separated
from starting the encoder stream was because we need to be able to
check that the settings used with the encoder *can* be used first.
This problem was especially annoying if you had both video/audio
encoding. Before, you'd have to check the return value from
obs_encoder_start, and if that second encoder fails, then you
basically had to stop the first encoder again, making for
unnecessary boilerplate code whenever starting up two encoders.
2014-03-27 21:50:15 -07:00
|
|
|
{
|
|
|
|
if (has_video) {
|
|
|
|
if (encoded) {
|
|
|
|
if (!output->video_encoder)
|
|
|
|
return false;
|
|
|
|
} else {
|
|
|
|
if (!output->video)
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (has_audio) {
|
(API Change) Add support for multiple audio mixers
API changed:
--------------------------
void obs_output_set_audio_encoder(
obs_output_t *output,
obs_encoder_t *encoder);
obs_encoder_t *obs_output_get_audio_encoder(
const obs_output_t *output);
obs_encoder_t *obs_audio_encoder_create(
const char *id,
const char *name,
obs_data_t *settings);
Changed to:
--------------------------
/* 'idx' specifies the track index of the output */
void obs_output_set_audio_encoder(
obs_output_t *output,
obs_encoder_t *encoder,
size_t idx);
/* 'idx' specifies the track index of the output */
obs_encoder_t *obs_output_get_audio_encoder(
const obs_output_t *output,
size_t idx);
/* 'mixer_idx' specifies the mixer index to capture audio from */
obs_encoder_t *obs_audio_encoder_create(
const char *id,
const char *name,
obs_data_t *settings,
size_t mixer_idx);
Overview
--------------------------
This feature allows multiple audio mixers to be used at a time. This
capability was able to be added with surprisingly very little extra
overhead. Audio will not be mixed unless it's assigned to a specific
mixer, and mixers will not mix unless they have an active mix
connection.
Mostly this will be useful for being able to separate out specific audio
for recording versus streaming, but will also be useful for certain
streaming services that support multiple audio streams via RTMP.
I didn't want to use a variable amount of mixers due to the desire to
reduce heap allocations, so currently I set the limit to 4 simultaneous
mixers; this number can be increased later if needed, but honestly I
feel like it's just the right number to use.
Sources:
Sources can now specify which audio mixers their audio is mixed to; this
can be a single mixer or multiple mixers at a time. The
obs_source_set_audio_mixers function sets the audio mixer which an audio
source applies to. For example, 0xF would mean that the source applies
to all four mixers.
Audio Encoders:
Audio encoders now must specify which specific audio mixer they use when
they encode audio data.
Outputs:
Outputs that use encoders can now support multiple audio tracks at once
if they have the OBS_OUTPUT_MULTI_TRACK capability flag set. This is
mostly only useful for certain types of RTMP transmissions, though may
be useful for file formats that support multiple audio tracks as well
later on.
2015-01-14 02:12:08 -08:00
|
|
|
if (!audio_valid(output, encoded)) {
|
|
|
|
return false;
|
Implement encoder usage with outputs
- Make it so that encoders can be assigned to outputs. If an encoder
is destroyed, it will automatically remove itself from that output.
I specifically didn't want to do reference counting because it leaves
too much potential for unchecked references and it just felt like it
would be more trouble than it's worth.
- Add a 'flags' value to the output definition structure. This lets
the output specify if it uses video/audio, and whether the output is
meant to be used with OBS encoders or not.
- Remove boilerplate code for outputs. This makes it easier to program
outputs. The boilerplate code involved before was mostly just
involving connecting to the audio/video data streams directly in each
output plugin.
Instead of doing that, simply add plugin callback functions for
receiving video/audio (either encoded or non-encoded, whichever it's
set to use), and then call obs_output_begin_data_capture and
obs_output_end_data_capture to automatically handle setting up
connections to raw or encoded video/audio streams for the plugin.
- Remove 'active' function from output callbacks, as it's no longer
really needed now that the libobs output context automatically knows
when the output is active or not.
- Make it so that an encoder cannot be destroyed until all data
connections to the encoder have been removed.
- Change the 'start' and 'stop' functions in the encoder interface to
just an 'initialize' callback, which initializes the encoder.
- Make it so that the encoder must be initialized first before the data
stream can be started. The reason why initialization was separated
from starting the encoder stream was because we need to be able to
check that the settings used with the encoder *can* be used first.
This problem was especially annoying if you had both video/audio
encoding. Before, you'd have to check the return value from
obs_encoder_start, and if that second encoder fails, then you
basically had to stop the first encoder again, making for
unnecessary boilerplate code whenever starting up two encoders.
2014-03-27 21:50:15 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
obs-studio UI: Implement stream settings UI
- Updated the services API so that it links up with an output and
the output gets data from that service rather than via settings.
This allows the service context to have control over how an output is
used, and makes it so that the URL/key/etc isn't necessarily some
static setting.
Also, if the service is attached to an output, it will stick around
until the output is destroyed.
- The settings interface has been updated so that it can allow the
usage of service plugins. What this means is that now you can create
a service plugin that can control aspects of the stream, and it
allows each service to create their own user interface if they create
a service plugin module.
- Testing out saving of current service information. Saves/loads from
JSON in to obs_data_t, seems to be working quite nicely, and the
service object information is saved/preserved on exit, and loaded
again on startup.
- I agonized over the settings user interface for days, and eventually
I just decided that the only way that users weren't going to be
fumbling over options was to split up the settings in to simple/basic
output, pre-configured, and then advanced for advanced use (such as
multiple outputs or services, which I'll implement later).
This was particularly painful to really design right, I wanted more
features and wanted to include everything in one interface but
ultimately just realized from experience that users are just not
technically knowledgable about it and will end up fumbling with the
settings rather than getting things done.
Basically, what this means is that casual users only have to enter in
about 3 things to configure their stream: Stream key, audio bitrate,
and video bitrate. I am really happy with this interface for those
types of users, but it definitely won't be sufficient for advanced
usage or for custom outputs, so that stuff will have to be separated.
- Improved the JSON usage for the 'common streaming services' context,
I realized that JSON arrays are there to ensure sorting, while
forgetting that general items are optimized for hashing. So
basically I'm just using arrays now to sort items in it.
2014-04-24 01:49:07 -07:00
|
|
|
if (has_service && !output->service)
|
|
|
|
return false;
|
|
|
|
|
Implement encoder usage with outputs
- Make it so that encoders can be assigned to outputs. If an encoder
is destroyed, it will automatically remove itself from that output.
I specifically didn't want to do reference counting because it leaves
too much potential for unchecked references and it just felt like it
would be more trouble than it's worth.
- Add a 'flags' value to the output definition structure. This lets
the output specify if it uses video/audio, and whether the output is
meant to be used with OBS encoders or not.
- Remove boilerplate code for outputs. This makes it easier to program
outputs. The boilerplate code involved before was mostly just
involving connecting to the audio/video data streams directly in each
output plugin.
Instead of doing that, simply add plugin callback functions for
receiving video/audio (either encoded or non-encoded, whichever it's
set to use), and then call obs_output_begin_data_capture and
obs_output_end_data_capture to automatically handle setting up
connections to raw or encoded video/audio streams for the plugin.
- Remove 'active' function from output callbacks, as it's no longer
really needed now that the libobs output context automatically knows
when the output is active or not.
- Make it so that an encoder cannot be destroyed until all data
connections to the encoder have been removed.
- Change the 'start' and 'stop' functions in the encoder interface to
just an 'initialize' callback, which initializes the encoder.
- Make it so that the encoder must be initialized first before the data
stream can be started. The reason why initialization was separated
from starting the encoder stream was because we need to be able to
check that the settings used with the encoder *can* be used first.
This problem was especially annoying if you had both video/audio
encoding. Before, you'd have to check the return value from
obs_encoder_start, and if that second encoder fails, then you
basically had to stop the first encoder again, making for
unnecessary boilerplate code whenever starting up two encoders.
2014-03-27 21:50:15 -07:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2014-09-26 15:25:59 -07:00
|
|
|
static inline bool has_scaling(const struct obs_output *output)
|
2014-08-10 16:50:44 -07:00
|
|
|
{
|
2019-06-22 22:13:45 -07:00
|
|
|
uint32_t video_width = video_output_get_width(output->video);
|
2014-08-10 16:50:44 -07:00
|
|
|
uint32_t video_height = video_output_get_height(output->video);
|
|
|
|
|
|
|
|
return output->scaled_width && output->scaled_height &&
|
2019-06-22 22:13:45 -07:00
|
|
|
(video_width != output->scaled_width ||
|
|
|
|
video_height != output->scaled_height);
|
2014-08-10 16:50:44 -07:00
|
|
|
}
|
|
|
|
|
2019-06-22 22:13:45 -07:00
|
|
|
static inline struct video_scale_info *
|
|
|
|
get_video_conversion(struct obs_output *output)
|
Implement encoder usage with outputs
- Make it so that encoders can be assigned to outputs. If an encoder
is destroyed, it will automatically remove itself from that output.
I specifically didn't want to do reference counting because it leaves
too much potential for unchecked references and it just felt like it
would be more trouble than it's worth.
- Add a 'flags' value to the output definition structure. This lets
the output specify if it uses video/audio, and whether the output is
meant to be used with OBS encoders or not.
- Remove boilerplate code for outputs. This makes it easier to program
outputs. The boilerplate code involved before was mostly just
involving connecting to the audio/video data streams directly in each
output plugin.
Instead of doing that, simply add plugin callback functions for
receiving video/audio (either encoded or non-encoded, whichever it's
set to use), and then call obs_output_begin_data_capture and
obs_output_end_data_capture to automatically handle setting up
connections to raw or encoded video/audio streams for the plugin.
- Remove 'active' function from output callbacks, as it's no longer
really needed now that the libobs output context automatically knows
when the output is active or not.
- Make it so that an encoder cannot be destroyed until all data
connections to the encoder have been removed.
- Change the 'start' and 'stop' functions in the encoder interface to
just an 'initialize' callback, which initializes the encoder.
- Make it so that the encoder must be initialized first before the data
stream can be started. The reason why initialization was separated
from starting the encoder stream was because we need to be able to
check that the settings used with the encoder *can* be used first.
This problem was especially annoying if you had both video/audio
encoding. Before, you'd have to check the return value from
obs_encoder_start, and if that second encoder fails, then you
basically had to stop the first encoder again, making for
unnecessary boilerplate code whenever starting up two encoders.
2014-03-27 21:50:15 -07:00
|
|
|
{
|
2014-08-10 16:50:44 -07:00
|
|
|
if (output->video_conversion_set) {
|
|
|
|
if (!output->video_conversion.width)
|
|
|
|
output->video_conversion.width =
|
|
|
|
obs_output_get_width(output);
|
|
|
|
|
|
|
|
if (!output->video_conversion.height)
|
|
|
|
output->video_conversion.height =
|
|
|
|
obs_output_get_height(output);
|
|
|
|
|
|
|
|
return &output->video_conversion;
|
|
|
|
|
|
|
|
} else if (has_scaling(output)) {
|
|
|
|
const struct video_output_info *info =
|
|
|
|
video_output_get_info(output->video);
|
|
|
|
|
2019-06-22 22:13:45 -07:00
|
|
|
output->video_conversion.format = info->format;
|
2014-08-10 16:50:44 -07:00
|
|
|
output->video_conversion.colorspace = VIDEO_CS_DEFAULT;
|
2019-06-22 22:13:45 -07:00
|
|
|
output->video_conversion.range = VIDEO_RANGE_DEFAULT;
|
|
|
|
output->video_conversion.width = output->scaled_width;
|
|
|
|
output->video_conversion.height = output->scaled_height;
|
2014-08-10 16:50:44 -07:00
|
|
|
return &output->video_conversion;
|
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
Implement encoder usage with outputs
- Make it so that encoders can be assigned to outputs. If an encoder
is destroyed, it will automatically remove itself from that output.
I specifically didn't want to do reference counting because it leaves
too much potential for unchecked references and it just felt like it
would be more trouble than it's worth.
- Add a 'flags' value to the output definition structure. This lets
the output specify if it uses video/audio, and whether the output is
meant to be used with OBS encoders or not.
- Remove boilerplate code for outputs. This makes it easier to program
outputs. The boilerplate code involved before was mostly just
involving connecting to the audio/video data streams directly in each
output plugin.
Instead of doing that, simply add plugin callback functions for
receiving video/audio (either encoded or non-encoded, whichever it's
set to use), and then call obs_output_begin_data_capture and
obs_output_end_data_capture to automatically handle setting up
connections to raw or encoded video/audio streams for the plugin.
- Remove 'active' function from output callbacks, as it's no longer
really needed now that the libobs output context automatically knows
when the output is active or not.
- Make it so that an encoder cannot be destroyed until all data
connections to the encoder have been removed.
- Change the 'start' and 'stop' functions in the encoder interface to
just an 'initialize' callback, which initializes the encoder.
- Make it so that the encoder must be initialized first before the data
stream can be started. The reason why initialization was separated
from starting the encoder stream was because we need to be able to
check that the settings used with the encoder *can* be used first.
This problem was especially annoying if you had both video/audio
encoding. Before, you'd have to check the return value from
obs_encoder_start, and if that second encoder fails, then you
basically had to stop the first encoder again, making for
unnecessary boilerplate code whenever starting up two encoders.
2014-03-27 21:50:15 -07:00
|
|
|
}
|
|
|
|
|
2019-06-22 22:13:45 -07:00
|
|
|
static inline struct audio_convert_info *
|
|
|
|
get_audio_conversion(struct obs_output *output)
|
Implement encoder usage with outputs
- Make it so that encoders can be assigned to outputs. If an encoder
is destroyed, it will automatically remove itself from that output.
I specifically didn't want to do reference counting because it leaves
too much potential for unchecked references and it just felt like it
would be more trouble than it's worth.
- Add a 'flags' value to the output definition structure. This lets
the output specify if it uses video/audio, and whether the output is
meant to be used with OBS encoders or not.
- Remove boilerplate code for outputs. This makes it easier to program
outputs. The boilerplate code involved before was mostly just
involving connecting to the audio/video data streams directly in each
output plugin.
Instead of doing that, simply add plugin callback functions for
receiving video/audio (either encoded or non-encoded, whichever it's
set to use), and then call obs_output_begin_data_capture and
obs_output_end_data_capture to automatically handle setting up
connections to raw or encoded video/audio streams for the plugin.
- Remove 'active' function from output callbacks, as it's no longer
really needed now that the libobs output context automatically knows
when the output is active or not.
- Make it so that an encoder cannot be destroyed until all data
connections to the encoder have been removed.
- Change the 'start' and 'stop' functions in the encoder interface to
just an 'initialize' callback, which initializes the encoder.
- Make it so that the encoder must be initialized first before the data
stream can be started. The reason why initialization was separated
from starting the encoder stream was because we need to be able to
check that the settings used with the encoder *can* be used first.
This problem was especially annoying if you had both video/audio
encoding. Before, you'd have to check the return value from
obs_encoder_start, and if that second encoder fails, then you
basically had to stop the first encoder again, making for
unnecessary boilerplate code whenever starting up two encoders.
2014-03-27 21:50:15 -07:00
|
|
|
{
|
|
|
|
return output->audio_conversion_set ? &output->audio_conversion : NULL;
|
|
|
|
}
|
|
|
|
|
(API Change) Add support for multiple audio mixers
API changed:
--------------------------
void obs_output_set_audio_encoder(
obs_output_t *output,
obs_encoder_t *encoder);
obs_encoder_t *obs_output_get_audio_encoder(
const obs_output_t *output);
obs_encoder_t *obs_audio_encoder_create(
const char *id,
const char *name,
obs_data_t *settings);
Changed to:
--------------------------
/* 'idx' specifies the track index of the output */
void obs_output_set_audio_encoder(
obs_output_t *output,
obs_encoder_t *encoder,
size_t idx);
/* 'idx' specifies the track index of the output */
obs_encoder_t *obs_output_get_audio_encoder(
const obs_output_t *output,
size_t idx);
/* 'mixer_idx' specifies the mixer index to capture audio from */
obs_encoder_t *obs_audio_encoder_create(
const char *id,
const char *name,
obs_data_t *settings,
size_t mixer_idx);
Overview
--------------------------
This feature allows multiple audio mixers to be used at a time. This
capability was able to be added with surprisingly very little extra
overhead. Audio will not be mixed unless it's assigned to a specific
mixer, and mixers will not mix unless they have an active mix
connection.
Mostly this will be useful for being able to separate out specific audio
for recording versus streaming, but will also be useful for certain
streaming services that support multiple audio streams via RTMP.
I didn't want to use a variable amount of mixers due to the desire to
reduce heap allocations, so currently I set the limit to 4 simultaneous
mixers; this number can be increased later if needed, but honestly I
feel like it's just the right number to use.
Sources:
Sources can now specify which audio mixers their audio is mixed to; this
can be a single mixer or multiple mixers at a time. The
obs_source_set_audio_mixers function sets the audio mixer which an audio
source applies to. For example, 0xF would mean that the source applies
to all four mixers.
Audio Encoders:
Audio encoders now must specify which specific audio mixer they use when
they encode audio data.
Outputs:
Outputs that use encoders can now support multiple audio tracks at once
if they have the OBS_OUTPUT_MULTI_TRACK capability flag set. This is
mostly only useful for certain types of RTMP transmissions, though may
be useful for file formats that support multiple audio tracks as well
later on.
2015-01-14 02:12:08 -08:00
|
|
|
static size_t get_track_index(const struct obs_output *output,
|
2019-06-22 22:13:45 -07:00
|
|
|
struct encoder_packet *pkt)
|
(API Change) Add support for multiple audio mixers
API changed:
--------------------------
void obs_output_set_audio_encoder(
obs_output_t *output,
obs_encoder_t *encoder);
obs_encoder_t *obs_output_get_audio_encoder(
const obs_output_t *output);
obs_encoder_t *obs_audio_encoder_create(
const char *id,
const char *name,
obs_data_t *settings);
Changed to:
--------------------------
/* 'idx' specifies the track index of the output */
void obs_output_set_audio_encoder(
obs_output_t *output,
obs_encoder_t *encoder,
size_t idx);
/* 'idx' specifies the track index of the output */
obs_encoder_t *obs_output_get_audio_encoder(
const obs_output_t *output,
size_t idx);
/* 'mixer_idx' specifies the mixer index to capture audio from */
obs_encoder_t *obs_audio_encoder_create(
const char *id,
const char *name,
obs_data_t *settings,
size_t mixer_idx);
Overview
--------------------------
This feature allows multiple audio mixers to be used at a time. This
capability was able to be added with surprisingly very little extra
overhead. Audio will not be mixed unless it's assigned to a specific
mixer, and mixers will not mix unless they have an active mix
connection.
Mostly this will be useful for being able to separate out specific audio
for recording versus streaming, but will also be useful for certain
streaming services that support multiple audio streams via RTMP.
I didn't want to use a variable amount of mixers due to the desire to
reduce heap allocations, so currently I set the limit to 4 simultaneous
mixers; this number can be increased later if needed, but honestly I
feel like it's just the right number to use.
Sources:
Sources can now specify which audio mixers their audio is mixed to; this
can be a single mixer or multiple mixers at a time. The
obs_source_set_audio_mixers function sets the audio mixer which an audio
source applies to. For example, 0xF would mean that the source applies
to all four mixers.
Audio Encoders:
Audio encoders now must specify which specific audio mixer they use when
they encode audio data.
Outputs:
Outputs that use encoders can now support multiple audio tracks at once
if they have the OBS_OUTPUT_MULTI_TRACK capability flag set. This is
mostly only useful for certain types of RTMP transmissions, though may
be useful for file formats that support multiple audio tracks as well
later on.
2015-01-14 02:12:08 -08:00
|
|
|
{
|
|
|
|
for (size_t i = 0; i < MAX_AUDIO_MIXES; i++) {
|
|
|
|
struct obs_encoder *encoder = output->audio_encoders[i];
|
|
|
|
|
|
|
|
if (pkt->encoder == encoder)
|
|
|
|
return i;
|
|
|
|
}
|
|
|
|
|
|
|
|
assert(false);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-02-10 21:47:34 -08:00
|
|
|
static inline void check_received(struct obs_output *output,
|
2019-06-22 22:13:45 -07:00
|
|
|
struct encoder_packet *out)
|
2014-04-04 23:21:19 -07:00
|
|
|
{
|
2014-10-22 15:16:37 -07:00
|
|
|
if (out->type == OBS_ENCODER_VIDEO) {
|
|
|
|
if (!output->received_video)
|
2014-04-04 23:21:19 -07:00
|
|
|
output->received_video = true;
|
2014-10-22 15:16:37 -07:00
|
|
|
} else {
|
|
|
|
if (!output->received_audio)
|
2014-04-04 23:21:19 -07:00
|
|
|
output->received_audio = true;
|
|
|
|
}
|
2015-02-10 21:47:34 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void apply_interleaved_packet_offset(struct obs_output *output,
|
2019-06-22 22:13:45 -07:00
|
|
|
struct encoder_packet *out)
|
2015-02-10 21:47:34 -08:00
|
|
|
{
|
|
|
|
int64_t offset;
|
|
|
|
|
|
|
|
/* audio and video need to start at timestamp 0, and the encoders
|
|
|
|
* may not currently be at 0 when we get data. so, we store the
|
|
|
|
* current dts as offset and subtract that value from the dts/pts
|
|
|
|
* of the output packet. */
|
2019-06-22 22:13:45 -07:00
|
|
|
offset = (out->type == OBS_ENCODER_VIDEO)
|
|
|
|
? output->video_offset
|
|
|
|
: output->audio_offsets[out->track_idx];
|
2014-04-04 23:21:19 -07:00
|
|
|
|
2014-04-10 11:59:42 -07:00
|
|
|
out->dts -= offset;
|
|
|
|
out->pts -= offset;
|
|
|
|
|
|
|
|
/* convert the newly adjusted dts to relative dts time to ensure proper
|
|
|
|
* interleaving. if we're using an audio encoder that's already been
|
|
|
|
* started on another output, then the first audio packet may not be
|
|
|
|
* quite perfectly synced up in terms of system time (and there's
|
|
|
|
* nothing we can really do about that), but it will always at least be
|
|
|
|
* within a 23ish millisecond threshold (at least for AAC) */
|
|
|
|
out->dts_usec = packet_dts_usec(out);
|
2014-04-04 23:21:19 -07:00
|
|
|
}
|
|
|
|
|
2014-04-26 23:29:40 -07:00
|
|
|
static inline bool has_higher_opposing_ts(struct obs_output *output,
|
2019-06-22 22:13:45 -07:00
|
|
|
struct encoder_packet *packet)
|
2014-04-26 23:29:40 -07:00
|
|
|
{
|
|
|
|
if (packet->type == OBS_ENCODER_VIDEO)
|
|
|
|
return output->highest_audio_ts > packet->dts_usec;
|
|
|
|
else
|
|
|
|
return output->highest_video_ts > packet->dts_usec;
|
|
|
|
}
|
|
|
|
|
2016-11-17 05:25:23 -08:00
|
|
|
static const uint8_t nal_start[4] = {0, 0, 0, 1};
|
|
|
|
|
|
|
|
static bool add_caption(struct obs_output *output, struct encoder_packet *out)
|
|
|
|
{
|
2016-12-24 03:45:19 -08:00
|
|
|
struct encoder_packet backup = *out;
|
2016-11-17 05:25:23 -08:00
|
|
|
sei_t sei;
|
|
|
|
uint8_t *data;
|
|
|
|
size_t size;
|
2016-12-24 03:45:19 -08:00
|
|
|
long ref = 1;
|
2016-11-17 05:25:23 -08:00
|
|
|
|
|
|
|
DARRAY(uint8_t) out_data;
|
|
|
|
|
|
|
|
if (out->priority > 1)
|
|
|
|
return false;
|
|
|
|
|
2018-06-28 11:22:44 -07:00
|
|
|
sei_init(&sei, 0.0);
|
2016-11-17 05:25:23 -08:00
|
|
|
|
2016-12-24 02:13:50 -08:00
|
|
|
da_init(out_data);
|
2021-07-21 06:34:42 -07:00
|
|
|
da_push_back_array(out_data, (uint8_t *)&ref, sizeof(ref));
|
2016-12-24 03:45:19 -08:00
|
|
|
da_push_back_array(out_data, out->data, out->size);
|
2016-12-24 02:13:50 -08:00
|
|
|
|
2019-08-26 15:58:20 -07:00
|
|
|
if (output->caption_data.size > 0) {
|
|
|
|
|
|
|
|
cea708_t cea708;
|
|
|
|
cea708_init(&cea708, 0); // set up a new popon frame
|
|
|
|
void *caption_buf = bzalloc(3 * sizeof(uint8_t));
|
|
|
|
|
|
|
|
while (output->caption_data.size > 0) {
|
|
|
|
circlebuf_pop_front(&output->caption_data, caption_buf,
|
|
|
|
3 * sizeof(uint8_t));
|
|
|
|
|
|
|
|
if ((((uint8_t *)caption_buf)[0] & 0x3) != 0) {
|
|
|
|
// only send cea 608
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
uint16_t captionData = ((uint8_t *)caption_buf)[1];
|
|
|
|
captionData = captionData << 8;
|
|
|
|
captionData += ((uint8_t *)caption_buf)[2];
|
|
|
|
|
|
|
|
// padding
|
|
|
|
if (captionData == 0x8080) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (captionData == 0) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!eia608_parity_varify(captionData)) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
cea708_add_cc_data(&cea708, 1,
|
|
|
|
((uint8_t *)caption_buf)[0] & 0x3,
|
|
|
|
captionData);
|
|
|
|
}
|
|
|
|
|
|
|
|
bfree(caption_buf);
|
2016-11-17 05:25:23 -08:00
|
|
|
|
2019-08-26 15:58:20 -07:00
|
|
|
sei_message_t *msg =
|
|
|
|
sei_message_new(sei_type_user_data_registered_itu_t_t35,
|
|
|
|
0, CEA608_MAX_SIZE);
|
|
|
|
msg->size = cea708_render(&cea708, sei_message_data(msg),
|
|
|
|
sei_message_size(msg));
|
|
|
|
sei_message_append(&sei, msg);
|
|
|
|
} else if (output->caption_head) {
|
|
|
|
caption_frame_t cf;
|
|
|
|
caption_frame_init(&cf);
|
|
|
|
caption_frame_from_text(&cf, &output->caption_head->text[0]);
|
|
|
|
|
|
|
|
sei_from_caption_frame(&sei, &cf);
|
|
|
|
|
2020-11-03 15:15:49 -08:00
|
|
|
struct caption_text *next = output->caption_head->next;
|
2019-08-26 15:58:20 -07:00
|
|
|
bfree(output->caption_head);
|
|
|
|
output->caption_head = next;
|
|
|
|
}
|
2016-11-17 05:25:23 -08:00
|
|
|
|
|
|
|
data = malloc(sei_render_size(&sei));
|
|
|
|
size = sei_render(&sei, data);
|
|
|
|
/* TODO SEI should come after AUD/SPS/PPS, but before any VCL */
|
|
|
|
da_push_back_array(out_data, nal_start, 4);
|
|
|
|
da_push_back_array(out_data, data, size);
|
|
|
|
free(data);
|
|
|
|
|
2016-12-24 02:13:50 -08:00
|
|
|
obs_encoder_packet_release(out);
|
|
|
|
|
2016-12-24 03:45:19 -08:00
|
|
|
*out = backup;
|
2019-06-22 22:13:45 -07:00
|
|
|
out->data = (uint8_t *)out_data.array + sizeof(ref);
|
2016-12-24 03:45:19 -08:00
|
|
|
out->size = out_data.num - sizeof(ref);
|
2016-12-24 02:13:50 -08:00
|
|
|
|
2016-11-17 05:25:23 -08:00
|
|
|
sei_free(&sei);
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2019-08-26 15:58:20 -07:00
|
|
|
double last_caption_timestamp = 0;
|
|
|
|
|
2014-04-04 23:21:19 -07:00
|
|
|
static inline void send_interleaved(struct obs_output *output)
|
|
|
|
{
|
2014-04-10 11:59:42 -07:00
|
|
|
struct encoder_packet out = output->interleaved_packets.array[0];
|
2014-04-04 23:21:19 -07:00
|
|
|
|
2014-04-26 23:29:40 -07:00
|
|
|
/* do not send an interleaved packet if there's no packet of the
|
2017-03-19 04:35:51 -07:00
|
|
|
* opposing type of a higher timestamp in the interleave buffer.
|
2014-04-26 23:29:40 -07:00
|
|
|
* this ensures that the timestamps are monotonic */
|
|
|
|
if (!has_higher_opposing_ts(output, &out))
|
|
|
|
return;
|
|
|
|
|
2016-11-17 05:25:23 -08:00
|
|
|
da_erase(output->interleaved_packets, 0);
|
|
|
|
|
|
|
|
if (out.type == OBS_ENCODER_VIDEO) {
|
2014-07-06 14:16:31 -07:00
|
|
|
output->total_frames++;
|
|
|
|
|
2016-11-17 05:25:23 -08:00
|
|
|
pthread_mutex_lock(&output->caption_mutex);
|
|
|
|
|
2019-06-22 22:13:45 -07:00
|
|
|
double frame_timestamp =
|
|
|
|
(out.pts * out.timebase_num) / (double)out.timebase_den;
|
2016-11-17 05:25:23 -08:00
|
|
|
|
|
|
|
if (output->caption_head &&
|
|
|
|
output->caption_timestamp <= frame_timestamp) {
|
2019-06-22 22:13:45 -07:00
|
|
|
blog(LOG_DEBUG, "Sending caption: %f \"%s\"",
|
|
|
|
frame_timestamp, &output->caption_head->text[0]);
|
2016-11-17 05:25:23 -08:00
|
|
|
|
2019-02-19 20:33:33 -08:00
|
|
|
double display_duration =
|
|
|
|
output->caption_head->display_duration;
|
|
|
|
|
2016-11-17 05:25:23 -08:00
|
|
|
if (add_caption(output, &out)) {
|
|
|
|
output->caption_timestamp =
|
2019-02-19 20:33:33 -08:00
|
|
|
frame_timestamp + display_duration;
|
2016-11-17 05:25:23 -08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-08-26 15:58:20 -07:00
|
|
|
if (output->caption_data.size > 0) {
|
|
|
|
if (last_caption_timestamp < frame_timestamp) {
|
|
|
|
last_caption_timestamp = frame_timestamp;
|
|
|
|
add_caption(output, &out);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-11-17 05:25:23 -08:00
|
|
|
pthread_mutex_unlock(&output->caption_mutex);
|
|
|
|
}
|
|
|
|
|
2016-06-11 11:42:29 -07:00
|
|
|
output->info.encoded_packet(output->context.data, &out);
|
2016-12-07 12:45:25 -08:00
|
|
|
obs_encoder_packet_release(&out);
|
2014-04-04 23:21:19 -07:00
|
|
|
}
|
|
|
|
|
2014-04-26 23:29:40 -07:00
|
|
|
static inline void set_higher_ts(struct obs_output *output,
|
2019-06-22 22:13:45 -07:00
|
|
|
struct encoder_packet *packet)
|
2014-04-26 23:29:40 -07:00
|
|
|
{
|
|
|
|
if (packet->type == OBS_ENCODER_VIDEO) {
|
|
|
|
if (output->highest_video_ts < packet->dts_usec)
|
|
|
|
output->highest_video_ts = packet->dts_usec;
|
|
|
|
} else {
|
|
|
|
if (output->highest_audio_ts < packet->dts_usec)
|
|
|
|
output->highest_audio_ts = packet->dts_usec;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-06-22 22:13:45 -07:00
|
|
|
static inline struct encoder_packet *
|
|
|
|
find_first_packet_type(struct obs_output *output, enum obs_encoder_type type,
|
|
|
|
size_t audio_idx);
|
2016-01-30 09:31:57 -08:00
|
|
|
static int find_first_packet_type_idx(struct obs_output *output,
|
2019-06-22 22:13:45 -07:00
|
|
|
enum obs_encoder_type type,
|
|
|
|
size_t audio_idx);
|
2016-01-25 02:41:02 -08:00
|
|
|
|
|
|
|
/* gets the point where audio and video are closest together */
|
|
|
|
static size_t get_interleaved_start_idx(struct obs_output *output)
|
|
|
|
{
|
|
|
|
int64_t closest_diff = 0x7FFFFFFFFFFFFFFFLL;
|
2019-06-22 22:13:45 -07:00
|
|
|
struct encoder_packet *first_video =
|
|
|
|
find_first_packet_type(output, OBS_ENCODER_VIDEO, 0);
|
2016-01-25 02:41:02 -08:00
|
|
|
size_t video_idx = DARRAY_INVALID;
|
|
|
|
size_t idx = 0;
|
|
|
|
|
|
|
|
for (size_t i = 0; i < output->interleaved_packets.num; i++) {
|
|
|
|
struct encoder_packet *packet =
|
|
|
|
&output->interleaved_packets.array[i];
|
|
|
|
int64_t diff;
|
|
|
|
|
|
|
|
if (packet->type != OBS_ENCODER_AUDIO) {
|
|
|
|
if (packet == first_video)
|
|
|
|
video_idx = i;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
diff = llabs(packet->dts_usec - first_video->dts_usec);
|
|
|
|
if (diff < closest_diff) {
|
|
|
|
closest_diff = diff;
|
|
|
|
idx = i;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return video_idx < idx ? video_idx : idx;
|
|
|
|
}
|
|
|
|
|
2016-01-30 09:33:20 -08:00
|
|
|
static int prune_premature_packets(struct obs_output *output)
|
2014-10-22 15:16:37 -07:00
|
|
|
{
|
2016-01-30 09:33:20 -08:00
|
|
|
size_t audio_mixes = num_audio_mixes(output);
|
|
|
|
struct encoder_packet *video;
|
|
|
|
int video_idx;
|
|
|
|
int max_idx;
|
2016-01-25 02:41:02 -08:00
|
|
|
int64_t duration_usec;
|
2016-01-30 09:33:20 -08:00
|
|
|
int64_t max_diff = 0;
|
2016-06-22 14:08:56 -07:00
|
|
|
int64_t diff = 0;
|
2014-10-22 15:16:37 -07:00
|
|
|
|
2016-01-30 09:33:20 -08:00
|
|
|
video_idx = find_first_packet_type_idx(output, OBS_ENCODER_VIDEO, 0);
|
|
|
|
if (video_idx == -1) {
|
|
|
|
output->received_video = false;
|
|
|
|
return -1;
|
|
|
|
}
|
2014-10-22 15:16:37 -07:00
|
|
|
|
2016-01-30 09:33:20 -08:00
|
|
|
max_idx = video_idx;
|
|
|
|
video = &output->interleaved_packets.array[video_idx];
|
|
|
|
duration_usec = video->timebase_num * 1000000LL / video->timebase_den;
|
2014-10-22 15:16:37 -07:00
|
|
|
|
2016-01-30 09:33:20 -08:00
|
|
|
for (size_t i = 0; i < audio_mixes; i++) {
|
|
|
|
struct encoder_packet *audio;
|
|
|
|
int audio_idx;
|
2014-10-22 15:16:37 -07:00
|
|
|
|
2016-01-30 09:33:20 -08:00
|
|
|
audio_idx = find_first_packet_type_idx(output,
|
2019-06-22 22:13:45 -07:00
|
|
|
OBS_ENCODER_AUDIO, i);
|
2016-01-30 09:33:20 -08:00
|
|
|
if (audio_idx == -1) {
|
|
|
|
output->received_audio = false;
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
audio = &output->interleaved_packets.array[audio_idx];
|
|
|
|
if (audio_idx > max_idx)
|
|
|
|
max_idx = audio_idx;
|
|
|
|
|
|
|
|
diff = audio->dts_usec - video->dts_usec;
|
|
|
|
if (diff > max_diff)
|
|
|
|
max_diff = diff;
|
|
|
|
}
|
2014-10-22 15:16:37 -07:00
|
|
|
|
2016-01-30 09:33:20 -08:00
|
|
|
return diff > duration_usec ? max_idx + 1 : 0;
|
2016-01-25 02:41:02 -08:00
|
|
|
}
|
2014-10-22 15:16:37 -07:00
|
|
|
|
2016-01-25 02:41:02 -08:00
|
|
|
static void discard_to_idx(struct obs_output *output, size_t idx)
|
|
|
|
{
|
|
|
|
for (size_t i = 0; i < idx; i++) {
|
|
|
|
struct encoder_packet *packet =
|
|
|
|
&output->interleaved_packets.array[i];
|
2016-12-07 12:45:25 -08:00
|
|
|
obs_encoder_packet_release(packet);
|
2016-01-25 02:41:02 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
da_erase_range(output->interleaved_packets, 0, idx);
|
2014-10-22 15:16:37 -07:00
|
|
|
}
|
|
|
|
|
2016-01-25 02:41:02 -08:00
|
|
|
#define DEBUG_STARTING_PACKETS 0
|
|
|
|
|
2016-01-30 09:33:20 -08:00
|
|
|
static bool prune_interleaved_packets(struct obs_output *output)
|
2014-10-22 15:16:37 -07:00
|
|
|
{
|
|
|
|
size_t start_idx = 0;
|
2016-01-30 09:33:20 -08:00
|
|
|
int prune_start = prune_premature_packets(output);
|
2014-10-22 15:16:37 -07:00
|
|
|
|
2016-01-25 02:41:02 -08:00
|
|
|
#if DEBUG_STARTING_PACKETS == 1
|
2016-04-20 08:36:14 -07:00
|
|
|
blog(LOG_DEBUG, "--------- Pruning! %d ---------", prune_start);
|
2016-01-25 02:41:02 -08:00
|
|
|
for (size_t i = 0; i < output->interleaved_packets.num; i++) {
|
|
|
|
struct encoder_packet *packet =
|
|
|
|
&output->interleaved_packets.array[i];
|
2016-04-20 08:36:14 -07:00
|
|
|
blog(LOG_DEBUG, "packet: %s %d, ts: %lld, pruned = %s",
|
2019-06-22 22:13:45 -07:00
|
|
|
packet->type == OBS_ENCODER_AUDIO ? "audio" : "video",
|
|
|
|
(int)packet->track_idx, packet->dts_usec,
|
|
|
|
(int)i < prune_start ? "true" : "false");
|
2016-01-25 02:41:02 -08:00
|
|
|
}
|
|
|
|
#endif
|
2014-10-22 15:16:37 -07:00
|
|
|
|
2016-01-25 02:41:02 -08:00
|
|
|
/* prunes the first video packet if it's too far away from audio */
|
2016-01-30 09:33:20 -08:00
|
|
|
if (prune_start == -1)
|
|
|
|
return false;
|
|
|
|
else if (prune_start != 0)
|
|
|
|
start_idx = (size_t)prune_start;
|
2016-01-25 02:41:02 -08:00
|
|
|
else
|
|
|
|
start_idx = get_interleaved_start_idx(output);
|
2014-10-22 15:16:37 -07:00
|
|
|
|
2016-01-25 02:41:02 -08:00
|
|
|
if (start_idx)
|
|
|
|
discard_to_idx(output, start_idx);
|
2016-01-30 09:33:20 -08:00
|
|
|
|
|
|
|
return true;
|
2014-10-22 15:16:37 -07:00
|
|
|
}
|
|
|
|
|
2016-01-30 09:31:57 -08:00
|
|
|
static int find_first_packet_type_idx(struct obs_output *output,
|
2019-06-22 22:13:45 -07:00
|
|
|
enum obs_encoder_type type,
|
|
|
|
size_t audio_idx)
|
2014-10-22 15:16:37 -07:00
|
|
|
{
|
|
|
|
for (size_t i = 0; i < output->interleaved_packets.num; i++) {
|
|
|
|
struct encoder_packet *packet =
|
|
|
|
&output->interleaved_packets.array[i];
|
|
|
|
|
(API Change) Add support for multiple audio mixers
API changed:
--------------------------
void obs_output_set_audio_encoder(
obs_output_t *output,
obs_encoder_t *encoder);
obs_encoder_t *obs_output_get_audio_encoder(
const obs_output_t *output);
obs_encoder_t *obs_audio_encoder_create(
const char *id,
const char *name,
obs_data_t *settings);
Changed to:
--------------------------
/* 'idx' specifies the track index of the output */
void obs_output_set_audio_encoder(
obs_output_t *output,
obs_encoder_t *encoder,
size_t idx);
/* 'idx' specifies the track index of the output */
obs_encoder_t *obs_output_get_audio_encoder(
const obs_output_t *output,
size_t idx);
/* 'mixer_idx' specifies the mixer index to capture audio from */
obs_encoder_t *obs_audio_encoder_create(
const char *id,
const char *name,
obs_data_t *settings,
size_t mixer_idx);
Overview
--------------------------
This feature allows multiple audio mixers to be used at a time. This
capability was able to be added with surprisingly very little extra
overhead. Audio will not be mixed unless it's assigned to a specific
mixer, and mixers will not mix unless they have an active mix
connection.
Mostly this will be useful for being able to separate out specific audio
for recording versus streaming, but will also be useful for certain
streaming services that support multiple audio streams via RTMP.
I didn't want to use a variable amount of mixers due to the desire to
reduce heap allocations, so currently I set the limit to 4 simultaneous
mixers; this number can be increased later if needed, but honestly I
feel like it's just the right number to use.
Sources:
Sources can now specify which audio mixers their audio is mixed to; this
can be a single mixer or multiple mixers at a time. The
obs_source_set_audio_mixers function sets the audio mixer which an audio
source applies to. For example, 0xF would mean that the source applies
to all four mixers.
Audio Encoders:
Audio encoders now must specify which specific audio mixer they use when
they encode audio data.
Outputs:
Outputs that use encoders can now support multiple audio tracks at once
if they have the OBS_OUTPUT_MULTI_TRACK capability flag set. This is
mostly only useful for certain types of RTMP transmissions, though may
be useful for file formats that support multiple audio tracks as well
later on.
2015-01-14 02:12:08 -08:00
|
|
|
if (packet->type == type) {
|
|
|
|
if (type == OBS_ENCODER_AUDIO &&
|
|
|
|
packet->track_idx != audio_idx) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2016-01-30 09:31:57 -08:00
|
|
|
return (int)i;
|
(API Change) Add support for multiple audio mixers
API changed:
--------------------------
void obs_output_set_audio_encoder(
obs_output_t *output,
obs_encoder_t *encoder);
obs_encoder_t *obs_output_get_audio_encoder(
const obs_output_t *output);
obs_encoder_t *obs_audio_encoder_create(
const char *id,
const char *name,
obs_data_t *settings);
Changed to:
--------------------------
/* 'idx' specifies the track index of the output */
void obs_output_set_audio_encoder(
obs_output_t *output,
obs_encoder_t *encoder,
size_t idx);
/* 'idx' specifies the track index of the output */
obs_encoder_t *obs_output_get_audio_encoder(
const obs_output_t *output,
size_t idx);
/* 'mixer_idx' specifies the mixer index to capture audio from */
obs_encoder_t *obs_audio_encoder_create(
const char *id,
const char *name,
obs_data_t *settings,
size_t mixer_idx);
Overview
--------------------------
This feature allows multiple audio mixers to be used at a time. This
capability was able to be added with surprisingly very little extra
overhead. Audio will not be mixed unless it's assigned to a specific
mixer, and mixers will not mix unless they have an active mix
connection.
Mostly this will be useful for being able to separate out specific audio
for recording versus streaming, but will also be useful for certain
streaming services that support multiple audio streams via RTMP.
I didn't want to use a variable amount of mixers due to the desire to
reduce heap allocations, so currently I set the limit to 4 simultaneous
mixers; this number can be increased later if needed, but honestly I
feel like it's just the right number to use.
Sources:
Sources can now specify which audio mixers their audio is mixed to; this
can be a single mixer or multiple mixers at a time. The
obs_source_set_audio_mixers function sets the audio mixer which an audio
source applies to. For example, 0xF would mean that the source applies
to all four mixers.
Audio Encoders:
Audio encoders now must specify which specific audio mixer they use when
they encode audio data.
Outputs:
Outputs that use encoders can now support multiple audio tracks at once
if they have the OBS_OUTPUT_MULTI_TRACK capability flag set. This is
mostly only useful for certain types of RTMP transmissions, though may
be useful for file formats that support multiple audio tracks as well
later on.
2015-01-14 02:12:08 -08:00
|
|
|
}
|
2014-10-22 15:16:37 -07:00
|
|
|
}
|
|
|
|
|
2016-01-30 09:31:57 -08:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2016-04-18 14:02:57 -07:00
|
|
|
static int find_last_packet_type_idx(struct obs_output *output,
|
2019-06-22 22:13:45 -07:00
|
|
|
enum obs_encoder_type type,
|
|
|
|
size_t audio_idx)
|
2016-04-18 14:02:57 -07:00
|
|
|
{
|
|
|
|
for (size_t i = output->interleaved_packets.num; i > 0; i--) {
|
|
|
|
struct encoder_packet *packet =
|
|
|
|
&output->interleaved_packets.array[i - 1];
|
|
|
|
|
|
|
|
if (packet->type == type) {
|
|
|
|
if (type == OBS_ENCODER_AUDIO &&
|
|
|
|
packet->track_idx != audio_idx) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
return (int)(i - 1);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2019-06-22 22:13:45 -07:00
|
|
|
static inline struct encoder_packet *
|
|
|
|
find_first_packet_type(struct obs_output *output, enum obs_encoder_type type,
|
|
|
|
size_t audio_idx)
|
2016-01-30 09:31:57 -08:00
|
|
|
{
|
|
|
|
int idx = find_first_packet_type_idx(output, type, audio_idx);
|
|
|
|
return (idx != -1) ? &output->interleaved_packets.array[idx] : NULL;
|
2014-10-22 15:16:37 -07:00
|
|
|
}
|
|
|
|
|
2019-06-22 22:13:45 -07:00
|
|
|
static inline struct encoder_packet *
|
|
|
|
find_last_packet_type(struct obs_output *output, enum obs_encoder_type type,
|
|
|
|
size_t audio_idx)
|
2016-04-18 14:02:57 -07:00
|
|
|
{
|
|
|
|
int idx = find_last_packet_type_idx(output, type, audio_idx);
|
|
|
|
return (idx != -1) ? &output->interleaved_packets.array[idx] : NULL;
|
|
|
|
}
|
|
|
|
|
2016-04-20 08:36:54 -07:00
|
|
|
static bool get_audio_and_video_packets(struct obs_output *output,
|
2019-06-22 22:13:45 -07:00
|
|
|
struct encoder_packet **video,
|
|
|
|
struct encoder_packet **audio,
|
|
|
|
size_t audio_mixes)
|
2014-10-22 15:16:37 -07:00
|
|
|
{
|
2016-04-20 08:36:54 -07:00
|
|
|
*video = find_first_packet_type(output, OBS_ENCODER_VIDEO, 0);
|
|
|
|
if (!*video)
|
2014-11-03 14:13:14 -08:00
|
|
|
output->received_video = false;
|
(API Change) Add support for multiple audio mixers
API changed:
--------------------------
void obs_output_set_audio_encoder(
obs_output_t *output,
obs_encoder_t *encoder);
obs_encoder_t *obs_output_get_audio_encoder(
const obs_output_t *output);
obs_encoder_t *obs_audio_encoder_create(
const char *id,
const char *name,
obs_data_t *settings);
Changed to:
--------------------------
/* 'idx' specifies the track index of the output */
void obs_output_set_audio_encoder(
obs_output_t *output,
obs_encoder_t *encoder,
size_t idx);
/* 'idx' specifies the track index of the output */
obs_encoder_t *obs_output_get_audio_encoder(
const obs_output_t *output,
size_t idx);
/* 'mixer_idx' specifies the mixer index to capture audio from */
obs_encoder_t *obs_audio_encoder_create(
const char *id,
const char *name,
obs_data_t *settings,
size_t mixer_idx);
Overview
--------------------------
This feature allows multiple audio mixers to be used at a time. This
capability was able to be added with surprisingly very little extra
overhead. Audio will not be mixed unless it's assigned to a specific
mixer, and mixers will not mix unless they have an active mix
connection.
Mostly this will be useful for being able to separate out specific audio
for recording versus streaming, but will also be useful for certain
streaming services that support multiple audio streams via RTMP.
I didn't want to use a variable amount of mixers due to the desire to
reduce heap allocations, so currently I set the limit to 4 simultaneous
mixers; this number can be increased later if needed, but honestly I
feel like it's just the right number to use.
Sources:
Sources can now specify which audio mixers their audio is mixed to; this
can be a single mixer or multiple mixers at a time. The
obs_source_set_audio_mixers function sets the audio mixer which an audio
source applies to. For example, 0xF would mean that the source applies
to all four mixers.
Audio Encoders:
Audio encoders now must specify which specific audio mixer they use when
they encode audio data.
Outputs:
Outputs that use encoders can now support multiple audio tracks at once
if they have the OBS_OUTPUT_MULTI_TRACK capability flag set. This is
mostly only useful for certain types of RTMP transmissions, though may
be useful for file formats that support multiple audio tracks as well
later on.
2015-01-14 02:12:08 -08:00
|
|
|
|
|
|
|
for (size_t i = 0; i < audio_mixes; i++) {
|
|
|
|
audio[i] = find_first_packet_type(output, OBS_ENCODER_AUDIO, i);
|
|
|
|
if (!audio[i]) {
|
|
|
|
output->received_audio = false;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-04-20 08:36:54 -07:00
|
|
|
if (!*video) {
|
2014-11-03 14:13:14 -08:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2016-04-20 08:36:54 -07:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool initialize_interleaved_packets(struct obs_output *output)
|
|
|
|
{
|
|
|
|
struct encoder_packet *video;
|
|
|
|
struct encoder_packet *audio[MAX_AUDIO_MIXES];
|
|
|
|
struct encoder_packet *last_audio[MAX_AUDIO_MIXES];
|
|
|
|
size_t audio_mixes = num_audio_mixes(output);
|
|
|
|
size_t start_idx;
|
|
|
|
|
|
|
|
if (!get_audio_and_video_packets(output, &video, audio, audio_mixes))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
for (size_t i = 0; i < audio_mixes; i++)
|
2019-06-22 22:13:45 -07:00
|
|
|
last_audio[i] =
|
|
|
|
find_last_packet_type(output, OBS_ENCODER_AUDIO, i);
|
2016-04-20 08:36:54 -07:00
|
|
|
|
2016-04-18 14:02:57 -07:00
|
|
|
/* ensure that there is audio past the first video packet */
|
|
|
|
for (size_t i = 0; i < audio_mixes; i++) {
|
|
|
|
if (last_audio[i]->dts_usec < video->dts_usec) {
|
|
|
|
output->received_audio = false;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-04-20 08:36:54 -07:00
|
|
|
/* clear out excess starting audio if it hasn't been already */
|
|
|
|
start_idx = get_interleaved_start_idx(output);
|
|
|
|
if (start_idx) {
|
|
|
|
discard_to_idx(output, start_idx);
|
|
|
|
if (!get_audio_and_video_packets(output, &video, audio,
|
2019-06-22 22:13:45 -07:00
|
|
|
audio_mixes))
|
2016-04-20 08:36:54 -07:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2014-10-22 15:16:37 -07:00
|
|
|
/* get new offsets */
|
2017-09-28 06:04:54 -07:00
|
|
|
output->video_offset = video->pts;
|
(API Change) Add support for multiple audio mixers
API changed:
--------------------------
void obs_output_set_audio_encoder(
obs_output_t *output,
obs_encoder_t *encoder);
obs_encoder_t *obs_output_get_audio_encoder(
const obs_output_t *output);
obs_encoder_t *obs_audio_encoder_create(
const char *id,
const char *name,
obs_data_t *settings);
Changed to:
--------------------------
/* 'idx' specifies the track index of the output */
void obs_output_set_audio_encoder(
obs_output_t *output,
obs_encoder_t *encoder,
size_t idx);
/* 'idx' specifies the track index of the output */
obs_encoder_t *obs_output_get_audio_encoder(
const obs_output_t *output,
size_t idx);
/* 'mixer_idx' specifies the mixer index to capture audio from */
obs_encoder_t *obs_audio_encoder_create(
const char *id,
const char *name,
obs_data_t *settings,
size_t mixer_idx);
Overview
--------------------------
This feature allows multiple audio mixers to be used at a time. This
capability was able to be added with surprisingly very little extra
overhead. Audio will not be mixed unless it's assigned to a specific
mixer, and mixers will not mix unless they have an active mix
connection.
Mostly this will be useful for being able to separate out specific audio
for recording versus streaming, but will also be useful for certain
streaming services that support multiple audio streams via RTMP.
I didn't want to use a variable amount of mixers due to the desire to
reduce heap allocations, so currently I set the limit to 4 simultaneous
mixers; this number can be increased later if needed, but honestly I
feel like it's just the right number to use.
Sources:
Sources can now specify which audio mixers their audio is mixed to; this
can be a single mixer or multiple mixers at a time. The
obs_source_set_audio_mixers function sets the audio mixer which an audio
source applies to. For example, 0xF would mean that the source applies
to all four mixers.
Audio Encoders:
Audio encoders now must specify which specific audio mixer they use when
they encode audio data.
Outputs:
Outputs that use encoders can now support multiple audio tracks at once
if they have the OBS_OUTPUT_MULTI_TRACK capability flag set. This is
mostly only useful for certain types of RTMP transmissions, though may
be useful for file formats that support multiple audio tracks as well
later on.
2015-01-14 02:12:08 -08:00
|
|
|
for (size_t i = 0; i < audio_mixes; i++)
|
|
|
|
output->audio_offsets[i] = audio[i]->dts;
|
2014-10-22 15:16:37 -07:00
|
|
|
|
2016-01-25 02:41:02 -08:00
|
|
|
#if DEBUG_STARTING_PACKETS == 1
|
|
|
|
int64_t v = video->dts_usec;
|
|
|
|
int64_t a = audio[0]->dts_usec;
|
|
|
|
int64_t diff = v - a;
|
|
|
|
|
2019-06-22 22:13:45 -07:00
|
|
|
blog(LOG_DEBUG,
|
|
|
|
"output '%s' offset for video: %lld, audio: %lld, "
|
|
|
|
"diff: %lldms",
|
|
|
|
output->context.name, v, a, diff / 1000LL);
|
2016-01-25 02:41:02 -08:00
|
|
|
#endif
|
|
|
|
|
2014-10-22 15:16:37 -07:00
|
|
|
/* subtract offsets from highest TS offset variables */
|
(API Change) Add support for multiple audio mixers
API changed:
--------------------------
void obs_output_set_audio_encoder(
obs_output_t *output,
obs_encoder_t *encoder);
obs_encoder_t *obs_output_get_audio_encoder(
const obs_output_t *output);
obs_encoder_t *obs_audio_encoder_create(
const char *id,
const char *name,
obs_data_t *settings);
Changed to:
--------------------------
/* 'idx' specifies the track index of the output */
void obs_output_set_audio_encoder(
obs_output_t *output,
obs_encoder_t *encoder,
size_t idx);
/* 'idx' specifies the track index of the output */
obs_encoder_t *obs_output_get_audio_encoder(
const obs_output_t *output,
size_t idx);
/* 'mixer_idx' specifies the mixer index to capture audio from */
obs_encoder_t *obs_audio_encoder_create(
const char *id,
const char *name,
obs_data_t *settings,
size_t mixer_idx);
Overview
--------------------------
This feature allows multiple audio mixers to be used at a time. This
capability was able to be added with surprisingly very little extra
overhead. Audio will not be mixed unless it's assigned to a specific
mixer, and mixers will not mix unless they have an active mix
connection.
Mostly this will be useful for being able to separate out specific audio
for recording versus streaming, but will also be useful for certain
streaming services that support multiple audio streams via RTMP.
I didn't want to use a variable amount of mixers due to the desire to
reduce heap allocations, so currently I set the limit to 4 simultaneous
mixers; this number can be increased later if needed, but honestly I
feel like it's just the right number to use.
Sources:
Sources can now specify which audio mixers their audio is mixed to; this
can be a single mixer or multiple mixers at a time. The
obs_source_set_audio_mixers function sets the audio mixer which an audio
source applies to. For example, 0xF would mean that the source applies
to all four mixers.
Audio Encoders:
Audio encoders now must specify which specific audio mixer they use when
they encode audio data.
Outputs:
Outputs that use encoders can now support multiple audio tracks at once
if they have the OBS_OUTPUT_MULTI_TRACK capability flag set. This is
mostly only useful for certain types of RTMP transmissions, though may
be useful for file formats that support multiple audio tracks as well
later on.
2015-01-14 02:12:08 -08:00
|
|
|
output->highest_audio_ts -= audio[0]->dts_usec;
|
2014-10-22 15:16:37 -07:00
|
|
|
output->highest_video_ts -= video->dts_usec;
|
|
|
|
|
|
|
|
/* apply new offsets to all existing packet DTS/PTS values */
|
|
|
|
for (size_t i = 0; i < output->interleaved_packets.num; i++) {
|
|
|
|
struct encoder_packet *packet =
|
|
|
|
&output->interleaved_packets.array[i];
|
|
|
|
apply_interleaved_packet_offset(output, packet);
|
|
|
|
}
|
2014-11-03 14:13:14 -08:00
|
|
|
|
|
|
|
return true;
|
2014-10-22 15:16:37 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void insert_interleaved_packet(struct obs_output *output,
|
2019-06-22 22:13:45 -07:00
|
|
|
struct encoder_packet *out)
|
2014-10-22 15:16:37 -07:00
|
|
|
{
|
|
|
|
size_t idx;
|
|
|
|
for (idx = 0; idx < output->interleaved_packets.num; idx++) {
|
|
|
|
struct encoder_packet *cur_packet;
|
|
|
|
cur_packet = output->interleaved_packets.array + idx;
|
|
|
|
|
2017-09-28 06:17:17 -07:00
|
|
|
if (out->dts_usec == cur_packet->dts_usec &&
|
|
|
|
out->type == OBS_ENCODER_VIDEO) {
|
2014-10-22 15:16:37 -07:00
|
|
|
break;
|
2017-09-28 06:17:17 -07:00
|
|
|
} else if (out->dts_usec < cur_packet->dts_usec) {
|
|
|
|
break;
|
|
|
|
}
|
2014-10-22 15:16:37 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
da_insert(output->interleaved_packets, idx, out);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void resort_interleaved_packets(struct obs_output *output)
|
|
|
|
{
|
|
|
|
DARRAY(struct encoder_packet) old_array;
|
|
|
|
|
|
|
|
old_array.da = output->interleaved_packets.da;
|
|
|
|
memset(&output->interleaved_packets, 0,
|
2019-06-22 22:13:45 -07:00
|
|
|
sizeof(output->interleaved_packets));
|
2014-10-22 15:16:37 -07:00
|
|
|
|
|
|
|
for (size_t i = 0; i < old_array.num; i++)
|
|
|
|
insert_interleaved_packet(output, &old_array.array[i]);
|
|
|
|
|
|
|
|
da_free(old_array);
|
|
|
|
}
|
|
|
|
|
2016-01-25 02:41:02 -08:00
|
|
|
static void discard_unused_audio_packets(struct obs_output *output,
|
2019-06-22 22:13:45 -07:00
|
|
|
int64_t dts_usec)
|
2016-01-25 02:41:02 -08:00
|
|
|
{
|
|
|
|
size_t idx = 0;
|
|
|
|
|
|
|
|
for (; idx < output->interleaved_packets.num; idx++) {
|
|
|
|
struct encoder_packet *p =
|
|
|
|
&output->interleaved_packets.array[idx];
|
|
|
|
|
|
|
|
if (p->dts_usec >= dts_usec)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (idx)
|
|
|
|
discard_to_idx(output, idx);
|
|
|
|
}
|
|
|
|
|
2014-04-04 00:30:37 -07:00
|
|
|
static void interleave_packets(void *data, struct encoder_packet *packet)
|
|
|
|
{
|
2019-06-22 22:13:45 -07:00
|
|
|
struct obs_output *output = data;
|
2014-04-10 11:59:42 -07:00
|
|
|
struct encoder_packet out;
|
2019-06-22 22:13:45 -07:00
|
|
|
bool was_started;
|
2014-04-04 23:21:19 -07:00
|
|
|
|
2016-06-11 11:42:29 -07:00
|
|
|
if (!active(output))
|
|
|
|
return;
|
|
|
|
|
(API Change) Add support for multiple audio mixers
API changed:
--------------------------
void obs_output_set_audio_encoder(
obs_output_t *output,
obs_encoder_t *encoder);
obs_encoder_t *obs_output_get_audio_encoder(
const obs_output_t *output);
obs_encoder_t *obs_audio_encoder_create(
const char *id,
const char *name,
obs_data_t *settings);
Changed to:
--------------------------
/* 'idx' specifies the track index of the output */
void obs_output_set_audio_encoder(
obs_output_t *output,
obs_encoder_t *encoder,
size_t idx);
/* 'idx' specifies the track index of the output */
obs_encoder_t *obs_output_get_audio_encoder(
const obs_output_t *output,
size_t idx);
/* 'mixer_idx' specifies the mixer index to capture audio from */
obs_encoder_t *obs_audio_encoder_create(
const char *id,
const char *name,
obs_data_t *settings,
size_t mixer_idx);
Overview
--------------------------
This feature allows multiple audio mixers to be used at a time. This
capability was able to be added with surprisingly very little extra
overhead. Audio will not be mixed unless it's assigned to a specific
mixer, and mixers will not mix unless they have an active mix
connection.
Mostly this will be useful for being able to separate out specific audio
for recording versus streaming, but will also be useful for certain
streaming services that support multiple audio streams via RTMP.
I didn't want to use a variable amount of mixers due to the desire to
reduce heap allocations, so currently I set the limit to 4 simultaneous
mixers; this number can be increased later if needed, but honestly I
feel like it's just the right number to use.
Sources:
Sources can now specify which audio mixers their audio is mixed to; this
can be a single mixer or multiple mixers at a time. The
obs_source_set_audio_mixers function sets the audio mixer which an audio
source applies to. For example, 0xF would mean that the source applies
to all four mixers.
Audio Encoders:
Audio encoders now must specify which specific audio mixer they use when
they encode audio data.
Outputs:
Outputs that use encoders can now support multiple audio tracks at once
if they have the OBS_OUTPUT_MULTI_TRACK capability flag set. This is
mostly only useful for certain types of RTMP transmissions, though may
be useful for file formats that support multiple audio tracks as well
later on.
2015-01-14 02:12:08 -08:00
|
|
|
if (packet->type == OBS_ENCODER_AUDIO)
|
|
|
|
packet->track_idx = get_track_index(output, packet);
|
|
|
|
|
2014-04-04 23:21:19 -07:00
|
|
|
pthread_mutex_lock(&output->interleaved_mutex);
|
|
|
|
|
2016-01-25 02:41:02 -08:00
|
|
|
/* if first video frame is not a keyframe, discard until received */
|
2019-06-22 22:13:45 -07:00
|
|
|
if (!output->received_video && packet->type == OBS_ENCODER_VIDEO &&
|
2016-01-25 02:41:02 -08:00
|
|
|
!packet->keyframe) {
|
|
|
|
discard_unused_audio_packets(output, packet->dts_usec);
|
|
|
|
pthread_mutex_unlock(&output->interleaved_mutex);
|
2016-06-11 11:42:29 -07:00
|
|
|
|
|
|
|
if (output->active_delay_ns)
|
2016-12-07 12:45:25 -08:00
|
|
|
obs_encoder_packet_release(packet);
|
2016-01-25 02:41:02 -08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2014-10-22 15:16:37 -07:00
|
|
|
was_started = output->received_audio && output->received_video;
|
2014-04-04 23:21:19 -07:00
|
|
|
|
libobs: Add encoded output delay support
This feature allows a user to delay an output (as long as the output
itself supports it). Needless to say this intended for live streams,
where users may want to delay their streams to prevent stream sniping,
cheating, and other such things.
The design this time was a bit more elaborate, but still simple in
design: the user can now schedule stops/starts without having to wait
for the stream itself to stop before being able to take any action.
Optionally, they can also forcibly stop stream (and delay) in case
something happens which they might not want to be streamed.
Additionally, a new option was added to preserve stream cutoff point on
disconnections/reconnections, so that if you get disconnected while
streaming, when it reconnects, it will reconnect right at the point
where it left off. This will probably be quite useful for a number of
applications in addition to regular delay, such as setting the delay to
1 second and then using this feature to minimize, for example, a
critical stream such as a tournament stream from getting any of its
stream data cut off. However, using this feature will of course cause
the stream data to buffer and increase delay (and memory usage) while
it's in the process of reconnecting.
2015-09-06 15:39:46 -07:00
|
|
|
if (output->active_delay_ns)
|
|
|
|
out = *packet;
|
|
|
|
else
|
2016-12-07 12:45:25 -08:00
|
|
|
obs_encoder_packet_create_instance(&out, packet);
|
2015-02-10 21:47:34 -08:00
|
|
|
|
|
|
|
if (was_started)
|
|
|
|
apply_interleaved_packet_offset(output, &out);
|
|
|
|
else
|
|
|
|
check_received(output, packet);
|
|
|
|
|
2014-10-22 15:16:37 -07:00
|
|
|
insert_interleaved_packet(output, &out);
|
|
|
|
set_higher_ts(output, &out);
|
2014-04-04 23:21:19 -07:00
|
|
|
|
2014-10-22 15:16:37 -07:00
|
|
|
/* when both video and audio have been received, we're ready
|
|
|
|
* to start sending out packets (one at a time) */
|
|
|
|
if (output->received_audio && output->received_video) {
|
|
|
|
if (!was_started) {
|
2016-01-30 09:33:20 -08:00
|
|
|
if (prune_interleaved_packets(output)) {
|
|
|
|
if (initialize_interleaved_packets(output)) {
|
|
|
|
resort_interleaved_packets(output);
|
|
|
|
send_interleaved(output);
|
|
|
|
}
|
2014-11-03 14:13:14 -08:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
send_interleaved(output);
|
2014-10-22 15:16:37 -07:00
|
|
|
}
|
2014-04-04 23:21:19 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
pthread_mutex_unlock(&output->interleaved_mutex);
|
2014-04-04 00:30:37 -07:00
|
|
|
}
|
|
|
|
|
2014-07-06 14:16:31 -07:00
|
|
|
static void default_encoded_callback(void *param, struct encoder_packet *packet)
|
|
|
|
{
|
|
|
|
struct obs_output *output = param;
|
(API Change) Add support for multiple audio mixers
API changed:
--------------------------
void obs_output_set_audio_encoder(
obs_output_t *output,
obs_encoder_t *encoder);
obs_encoder_t *obs_output_get_audio_encoder(
const obs_output_t *output);
obs_encoder_t *obs_audio_encoder_create(
const char *id,
const char *name,
obs_data_t *settings);
Changed to:
--------------------------
/* 'idx' specifies the track index of the output */
void obs_output_set_audio_encoder(
obs_output_t *output,
obs_encoder_t *encoder,
size_t idx);
/* 'idx' specifies the track index of the output */
obs_encoder_t *obs_output_get_audio_encoder(
const obs_output_t *output,
size_t idx);
/* 'mixer_idx' specifies the mixer index to capture audio from */
obs_encoder_t *obs_audio_encoder_create(
const char *id,
const char *name,
obs_data_t *settings,
size_t mixer_idx);
Overview
--------------------------
This feature allows multiple audio mixers to be used at a time. This
capability was able to be added with surprisingly very little extra
overhead. Audio will not be mixed unless it's assigned to a specific
mixer, and mixers will not mix unless they have an active mix
connection.
Mostly this will be useful for being able to separate out specific audio
for recording versus streaming, but will also be useful for certain
streaming services that support multiple audio streams via RTMP.
I didn't want to use a variable amount of mixers due to the desire to
reduce heap allocations, so currently I set the limit to 4 simultaneous
mixers; this number can be increased later if needed, but honestly I
feel like it's just the right number to use.
Sources:
Sources can now specify which audio mixers their audio is mixed to; this
can be a single mixer or multiple mixers at a time. The
obs_source_set_audio_mixers function sets the audio mixer which an audio
source applies to. For example, 0xF would mean that the source applies
to all four mixers.
Audio Encoders:
Audio encoders now must specify which specific audio mixer they use when
they encode audio data.
Outputs:
Outputs that use encoders can now support multiple audio tracks at once
if they have the OBS_OUTPUT_MULTI_TRACK capability flag set. This is
mostly only useful for certain types of RTMP transmissions, though may
be useful for file formats that support multiple audio tracks as well
later on.
2015-01-14 02:12:08 -08:00
|
|
|
|
2016-06-11 11:42:29 -07:00
|
|
|
if (data_active(output)) {
|
|
|
|
if (packet->type == OBS_ENCODER_AUDIO)
|
|
|
|
packet->track_idx = get_track_index(output, packet);
|
(API Change) Add support for multiple audio mixers
API changed:
--------------------------
void obs_output_set_audio_encoder(
obs_output_t *output,
obs_encoder_t *encoder);
obs_encoder_t *obs_output_get_audio_encoder(
const obs_output_t *output);
obs_encoder_t *obs_audio_encoder_create(
const char *id,
const char *name,
obs_data_t *settings);
Changed to:
--------------------------
/* 'idx' specifies the track index of the output */
void obs_output_set_audio_encoder(
obs_output_t *output,
obs_encoder_t *encoder,
size_t idx);
/* 'idx' specifies the track index of the output */
obs_encoder_t *obs_output_get_audio_encoder(
const obs_output_t *output,
size_t idx);
/* 'mixer_idx' specifies the mixer index to capture audio from */
obs_encoder_t *obs_audio_encoder_create(
const char *id,
const char *name,
obs_data_t *settings,
size_t mixer_idx);
Overview
--------------------------
This feature allows multiple audio mixers to be used at a time. This
capability was able to be added with surprisingly very little extra
overhead. Audio will not be mixed unless it's assigned to a specific
mixer, and mixers will not mix unless they have an active mix
connection.
Mostly this will be useful for being able to separate out specific audio
for recording versus streaming, but will also be useful for certain
streaming services that support multiple audio streams via RTMP.
I didn't want to use a variable amount of mixers due to the desire to
reduce heap allocations, so currently I set the limit to 4 simultaneous
mixers; this number can be increased later if needed, but honestly I
feel like it's just the right number to use.
Sources:
Sources can now specify which audio mixers their audio is mixed to; this
can be a single mixer or multiple mixers at a time. The
obs_source_set_audio_mixers function sets the audio mixer which an audio
source applies to. For example, 0xF would mean that the source applies
to all four mixers.
Audio Encoders:
Audio encoders now must specify which specific audio mixer they use when
they encode audio data.
Outputs:
Outputs that use encoders can now support multiple audio tracks at once
if they have the OBS_OUTPUT_MULTI_TRACK capability flag set. This is
mostly only useful for certain types of RTMP transmissions, though may
be useful for file formats that support multiple audio tracks as well
later on.
2015-01-14 02:12:08 -08:00
|
|
|
|
2014-12-31 01:30:54 -08:00
|
|
|
output->info.encoded_packet(output->context.data, packet);
|
2016-06-11 11:42:29 -07:00
|
|
|
|
|
|
|
if (packet->type == OBS_ENCODER_VIDEO)
|
|
|
|
output->total_frames++;
|
|
|
|
}
|
|
|
|
|
libobs: Add encoded output delay support
This feature allows a user to delay an output (as long as the output
itself supports it). Needless to say this intended for live streams,
where users may want to delay their streams to prevent stream sniping,
cheating, and other such things.
The design this time was a bit more elaborate, but still simple in
design: the user can now schedule stops/starts without having to wait
for the stream itself to stop before being able to take any action.
Optionally, they can also forcibly stop stream (and delay) in case
something happens which they might not want to be streamed.
Additionally, a new option was added to preserve stream cutoff point on
disconnections/reconnections, so that if you get disconnected while
streaming, when it reconnects, it will reconnect right at the point
where it left off. This will probably be quite useful for a number of
applications in addition to regular delay, such as setting the delay to
1 second and then using this feature to minimize, for example, a
critical stream such as a tournament stream from getting any of its
stream data cut off. However, using this feature will of course cause
the stream data to buffer and increase delay (and memory usage) while
it's in the process of reconnecting.
2015-09-06 15:39:46 -07:00
|
|
|
if (output->active_delay_ns)
|
2016-12-07 12:45:25 -08:00
|
|
|
obs_encoder_packet_release(packet);
|
2014-07-06 14:16:31 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
static void default_raw_video_callback(void *param, struct video_data *frame)
|
|
|
|
{
|
|
|
|
struct obs_output *output = param;
|
2019-07-07 12:27:13 -07:00
|
|
|
|
|
|
|
if (video_pause_check(&output->pause, frame->timestamp))
|
|
|
|
return;
|
|
|
|
|
2016-06-11 11:42:29 -07:00
|
|
|
if (data_active(output))
|
2014-12-31 01:30:54 -08:00
|
|
|
output->info.raw_video(output->context.data, frame);
|
2014-07-06 14:16:31 -07:00
|
|
|
output->total_frames++;
|
2019-07-06 20:07:37 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
static bool prepare_audio(struct obs_output *output,
|
|
|
|
const struct audio_data *old, struct audio_data *new)
|
|
|
|
{
|
2019-07-07 12:27:13 -07:00
|
|
|
if (!output->video_start_ts) {
|
|
|
|
pthread_mutex_lock(&output->pause.mutex);
|
|
|
|
output->video_start_ts = output->pause.last_video_ts;
|
|
|
|
pthread_mutex_unlock(&output->pause.mutex);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!output->video_start_ts)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
/* ------------------ */
|
|
|
|
|
2019-07-06 20:07:37 -07:00
|
|
|
*new = *old;
|
|
|
|
|
|
|
|
if (old->timestamp < output->video_start_ts) {
|
2020-03-21 02:55:12 -07:00
|
|
|
uint64_t duration = util_mul_div64(old->frames, 1000000000ULL,
|
|
|
|
output->sample_rate);
|
2019-07-06 20:07:37 -07:00
|
|
|
uint64_t end_ts = (old->timestamp + duration);
|
|
|
|
uint64_t cutoff;
|
|
|
|
|
|
|
|
if (end_ts <= output->video_start_ts)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
cutoff = output->video_start_ts - old->timestamp;
|
|
|
|
new->timestamp += cutoff;
|
|
|
|
|
2020-03-21 02:55:12 -07:00
|
|
|
cutoff = util_mul_div64(cutoff, output->sample_rate,
|
|
|
|
1000000000ULL);
|
2019-07-06 20:07:37 -07:00
|
|
|
|
|
|
|
for (size_t i = 0; i < output->planes; i++)
|
|
|
|
new->data[i] += output->audio_size *(uint32_t)cutoff;
|
|
|
|
new->frames -= (uint32_t)cutoff;
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
2014-07-06 14:16:31 -07:00
|
|
|
}
|
|
|
|
|
(API Change) Add support for multiple audio mixers
API changed:
--------------------------
void obs_output_set_audio_encoder(
obs_output_t *output,
obs_encoder_t *encoder);
obs_encoder_t *obs_output_get_audio_encoder(
const obs_output_t *output);
obs_encoder_t *obs_audio_encoder_create(
const char *id,
const char *name,
obs_data_t *settings);
Changed to:
--------------------------
/* 'idx' specifies the track index of the output */
void obs_output_set_audio_encoder(
obs_output_t *output,
obs_encoder_t *encoder,
size_t idx);
/* 'idx' specifies the track index of the output */
obs_encoder_t *obs_output_get_audio_encoder(
const obs_output_t *output,
size_t idx);
/* 'mixer_idx' specifies the mixer index to capture audio from */
obs_encoder_t *obs_audio_encoder_create(
const char *id,
const char *name,
obs_data_t *settings,
size_t mixer_idx);
Overview
--------------------------
This feature allows multiple audio mixers to be used at a time. This
capability was able to be added with surprisingly very little extra
overhead. Audio will not be mixed unless it's assigned to a specific
mixer, and mixers will not mix unless they have an active mix
connection.
Mostly this will be useful for being able to separate out specific audio
for recording versus streaming, but will also be useful for certain
streaming services that support multiple audio streams via RTMP.
I didn't want to use a variable amount of mixers due to the desire to
reduce heap allocations, so currently I set the limit to 4 simultaneous
mixers; this number can be increased later if needed, but honestly I
feel like it's just the right number to use.
Sources:
Sources can now specify which audio mixers their audio is mixed to; this
can be a single mixer or multiple mixers at a time. The
obs_source_set_audio_mixers function sets the audio mixer which an audio
source applies to. For example, 0xF would mean that the source applies
to all four mixers.
Audio Encoders:
Audio encoders now must specify which specific audio mixer they use when
they encode audio data.
Outputs:
Outputs that use encoders can now support multiple audio tracks at once
if they have the OBS_OUTPUT_MULTI_TRACK capability flag set. This is
mostly only useful for certain types of RTMP transmissions, though may
be useful for file formats that support multiple audio tracks as well
later on.
2015-01-14 02:12:08 -08:00
|
|
|
static void default_raw_audio_callback(void *param, size_t mix_idx,
|
2019-07-06 20:07:37 -07:00
|
|
|
struct audio_data *in)
|
2014-12-31 01:30:54 -08:00
|
|
|
{
|
|
|
|
struct obs_output *output = param;
|
2019-07-06 20:07:37 -07:00
|
|
|
struct audio_data out;
|
|
|
|
size_t frame_size_bytes;
|
|
|
|
|
2016-06-11 11:42:29 -07:00
|
|
|
if (!data_active(output))
|
|
|
|
return;
|
|
|
|
|
2019-07-06 20:07:37 -07:00
|
|
|
/* -------------- */
|
|
|
|
|
|
|
|
if (!prepare_audio(output, in, &out))
|
|
|
|
return;
|
2019-07-07 12:27:13 -07:00
|
|
|
if (audio_pause_check(&output->pause, &out, output->sample_rate))
|
|
|
|
return;
|
2019-07-06 20:07:37 -07:00
|
|
|
if (!output->audio_start_ts) {
|
|
|
|
output->audio_start_ts = out.timestamp;
|
|
|
|
}
|
|
|
|
|
|
|
|
frame_size_bytes = AUDIO_OUTPUT_FRAMES * output->audio_size;
|
|
|
|
|
|
|
|
for (size_t i = 0; i < output->planes; i++)
|
|
|
|
circlebuf_push_back(&output->audio_buffer[mix_idx][i],
|
|
|
|
out.data[i],
|
|
|
|
out.frames * output->audio_size);
|
|
|
|
|
|
|
|
/* -------------- */
|
|
|
|
|
|
|
|
while (output->audio_buffer[mix_idx][0].size > frame_size_bytes) {
|
|
|
|
for (size_t i = 0; i < output->planes; i++) {
|
|
|
|
circlebuf_pop_front(&output->audio_buffer[mix_idx][i],
|
|
|
|
output->audio_data[i],
|
|
|
|
frame_size_bytes);
|
|
|
|
out.data[i] = (uint8_t *)output->audio_data[i];
|
|
|
|
}
|
|
|
|
|
|
|
|
out.frames = AUDIO_OUTPUT_FRAMES;
|
|
|
|
out.timestamp = output->audio_start_ts +
|
|
|
|
audio_frames_to_ns(output->sample_rate,
|
|
|
|
output->total_audio_frames);
|
|
|
|
|
2019-07-07 12:27:13 -07:00
|
|
|
pthread_mutex_lock(&output->pause.mutex);
|
|
|
|
out.timestamp += output->pause.ts_offset;
|
|
|
|
pthread_mutex_unlock(&output->pause.mutex);
|
|
|
|
|
2019-07-06 20:07:37 -07:00
|
|
|
output->total_audio_frames += AUDIO_OUTPUT_FRAMES;
|
|
|
|
|
|
|
|
if (output->info.raw_audio2)
|
|
|
|
output->info.raw_audio2(output->context.data, mix_idx,
|
|
|
|
&out);
|
|
|
|
else
|
|
|
|
output->info.raw_audio(output->context.data, &out);
|
|
|
|
}
|
(API Change) Add support for multiple audio mixers
API changed:
--------------------------
void obs_output_set_audio_encoder(
obs_output_t *output,
obs_encoder_t *encoder);
obs_encoder_t *obs_output_get_audio_encoder(
const obs_output_t *output);
obs_encoder_t *obs_audio_encoder_create(
const char *id,
const char *name,
obs_data_t *settings);
Changed to:
--------------------------
/* 'idx' specifies the track index of the output */
void obs_output_set_audio_encoder(
obs_output_t *output,
obs_encoder_t *encoder,
size_t idx);
/* 'idx' specifies the track index of the output */
obs_encoder_t *obs_output_get_audio_encoder(
const obs_output_t *output,
size_t idx);
/* 'mixer_idx' specifies the mixer index to capture audio from */
obs_encoder_t *obs_audio_encoder_create(
const char *id,
const char *name,
obs_data_t *settings,
size_t mixer_idx);
Overview
--------------------------
This feature allows multiple audio mixers to be used at a time. This
capability was able to be added with surprisingly very little extra
overhead. Audio will not be mixed unless it's assigned to a specific
mixer, and mixers will not mix unless they have an active mix
connection.
Mostly this will be useful for being able to separate out specific audio
for recording versus streaming, but will also be useful for certain
streaming services that support multiple audio streams via RTMP.
I didn't want to use a variable amount of mixers due to the desire to
reduce heap allocations, so currently I set the limit to 4 simultaneous
mixers; this number can be increased later if needed, but honestly I
feel like it's just the right number to use.
Sources:
Sources can now specify which audio mixers their audio is mixed to; this
can be a single mixer or multiple mixers at a time. The
obs_source_set_audio_mixers function sets the audio mixer which an audio
source applies to. For example, 0xF would mean that the source applies
to all four mixers.
Audio Encoders:
Audio encoders now must specify which specific audio mixer they use when
they encode audio data.
Outputs:
Outputs that use encoders can now support multiple audio tracks at once
if they have the OBS_OUTPUT_MULTI_TRACK capability flag set. This is
mostly only useful for certain types of RTMP transmissions, though may
be useful for file formats that support multiple audio tracks as well
later on.
2015-01-14 02:12:08 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void start_audio_encoders(struct obs_output *output,
|
2019-06-22 22:13:45 -07:00
|
|
|
encoded_callback_t encoded_callback)
|
(API Change) Add support for multiple audio mixers
API changed:
--------------------------
void obs_output_set_audio_encoder(
obs_output_t *output,
obs_encoder_t *encoder);
obs_encoder_t *obs_output_get_audio_encoder(
const obs_output_t *output);
obs_encoder_t *obs_audio_encoder_create(
const char *id,
const char *name,
obs_data_t *settings);
Changed to:
--------------------------
/* 'idx' specifies the track index of the output */
void obs_output_set_audio_encoder(
obs_output_t *output,
obs_encoder_t *encoder,
size_t idx);
/* 'idx' specifies the track index of the output */
obs_encoder_t *obs_output_get_audio_encoder(
const obs_output_t *output,
size_t idx);
/* 'mixer_idx' specifies the mixer index to capture audio from */
obs_encoder_t *obs_audio_encoder_create(
const char *id,
const char *name,
obs_data_t *settings,
size_t mixer_idx);
Overview
--------------------------
This feature allows multiple audio mixers to be used at a time. This
capability was able to be added with surprisingly very little extra
overhead. Audio will not be mixed unless it's assigned to a specific
mixer, and mixers will not mix unless they have an active mix
connection.
Mostly this will be useful for being able to separate out specific audio
for recording versus streaming, but will also be useful for certain
streaming services that support multiple audio streams via RTMP.
I didn't want to use a variable amount of mixers due to the desire to
reduce heap allocations, so currently I set the limit to 4 simultaneous
mixers; this number can be increased later if needed, but honestly I
feel like it's just the right number to use.
Sources:
Sources can now specify which audio mixers their audio is mixed to; this
can be a single mixer or multiple mixers at a time. The
obs_source_set_audio_mixers function sets the audio mixer which an audio
source applies to. For example, 0xF would mean that the source applies
to all four mixers.
Audio Encoders:
Audio encoders now must specify which specific audio mixer they use when
they encode audio data.
Outputs:
Outputs that use encoders can now support multiple audio tracks at once
if they have the OBS_OUTPUT_MULTI_TRACK capability flag set. This is
mostly only useful for certain types of RTMP transmissions, though may
be useful for file formats that support multiple audio tracks as well
later on.
2015-01-14 02:12:08 -08:00
|
|
|
{
|
|
|
|
size_t num_mixes = num_audio_mixes(output);
|
|
|
|
|
|
|
|
for (size_t i = 0; i < num_mixes; i++) {
|
2019-06-22 22:13:45 -07:00
|
|
|
obs_encoder_start(output->audio_encoders[i], encoded_callback,
|
|
|
|
output);
|
(API Change) Add support for multiple audio mixers
API changed:
--------------------------
void obs_output_set_audio_encoder(
obs_output_t *output,
obs_encoder_t *encoder);
obs_encoder_t *obs_output_get_audio_encoder(
const obs_output_t *output);
obs_encoder_t *obs_audio_encoder_create(
const char *id,
const char *name,
obs_data_t *settings);
Changed to:
--------------------------
/* 'idx' specifies the track index of the output */
void obs_output_set_audio_encoder(
obs_output_t *output,
obs_encoder_t *encoder,
size_t idx);
/* 'idx' specifies the track index of the output */
obs_encoder_t *obs_output_get_audio_encoder(
const obs_output_t *output,
size_t idx);
/* 'mixer_idx' specifies the mixer index to capture audio from */
obs_encoder_t *obs_audio_encoder_create(
const char *id,
const char *name,
obs_data_t *settings,
size_t mixer_idx);
Overview
--------------------------
This feature allows multiple audio mixers to be used at a time. This
capability was able to be added with surprisingly very little extra
overhead. Audio will not be mixed unless it's assigned to a specific
mixer, and mixers will not mix unless they have an active mix
connection.
Mostly this will be useful for being able to separate out specific audio
for recording versus streaming, but will also be useful for certain
streaming services that support multiple audio streams via RTMP.
I didn't want to use a variable amount of mixers due to the desire to
reduce heap allocations, so currently I set the limit to 4 simultaneous
mixers; this number can be increased later if needed, but honestly I
feel like it's just the right number to use.
Sources:
Sources can now specify which audio mixers their audio is mixed to; this
can be a single mixer or multiple mixers at a time. The
obs_source_set_audio_mixers function sets the audio mixer which an audio
source applies to. For example, 0xF would mean that the source applies
to all four mixers.
Audio Encoders:
Audio encoders now must specify which specific audio mixer they use when
they encode audio data.
Outputs:
Outputs that use encoders can now support multiple audio tracks at once
if they have the OBS_OUTPUT_MULTI_TRACK capability flag set. This is
mostly only useful for certain types of RTMP transmissions, though may
be useful for file formats that support multiple audio tracks as well
later on.
2015-01-14 02:12:08 -08:00
|
|
|
}
|
2014-12-31 01:30:54 -08:00
|
|
|
}
|
|
|
|
|
2018-10-04 20:04:40 -07:00
|
|
|
static inline void start_raw_audio(obs_output_t *output)
|
|
|
|
{
|
|
|
|
if (output->info.raw_audio2) {
|
|
|
|
for (int idx = 0; idx < MAX_AUDIO_MIXES; idx++) {
|
|
|
|
if ((output->mixer_mask & ((size_t)1 << idx)) != 0) {
|
2019-06-22 22:13:45 -07:00
|
|
|
audio_output_connect(
|
|
|
|
output->audio, idx,
|
|
|
|
get_audio_conversion(output),
|
|
|
|
default_raw_audio_callback, output);
|
2018-10-04 20:04:40 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
audio_output_connect(output->audio, get_first_mixer(output),
|
|
|
|
get_audio_conversion(output),
|
2019-06-22 22:13:45 -07:00
|
|
|
default_raw_audio_callback, output);
|
2018-10-04 20:04:40 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
libobs: Add encoded output delay support
This feature allows a user to delay an output (as long as the output
itself supports it). Needless to say this intended for live streams,
where users may want to delay their streams to prevent stream sniping,
cheating, and other such things.
The design this time was a bit more elaborate, but still simple in
design: the user can now schedule stops/starts without having to wait
for the stream itself to stop before being able to take any action.
Optionally, they can also forcibly stop stream (and delay) in case
something happens which they might not want to be streamed.
Additionally, a new option was added to preserve stream cutoff point on
disconnections/reconnections, so that if you get disconnected while
streaming, when it reconnects, it will reconnect right at the point
where it left off. This will probably be quite useful for a number of
applications in addition to regular delay, such as setting the delay to
1 second and then using this feature to minimize, for example, a
critical stream such as a tournament stream from getting any of its
stream data cut off. However, using this feature will of course cause
the stream data to buffer and increase delay (and memory usage) while
it's in the process of reconnecting.
2015-09-06 15:39:46 -07:00
|
|
|
static void reset_packet_data(obs_output_t *output)
|
|
|
|
{
|
2019-06-22 22:13:45 -07:00
|
|
|
output->received_audio = false;
|
|
|
|
output->received_video = false;
|
libobs: Add encoded output delay support
This feature allows a user to delay an output (as long as the output
itself supports it). Needless to say this intended for live streams,
where users may want to delay their streams to prevent stream sniping,
cheating, and other such things.
The design this time was a bit more elaborate, but still simple in
design: the user can now schedule stops/starts without having to wait
for the stream itself to stop before being able to take any action.
Optionally, they can also forcibly stop stream (and delay) in case
something happens which they might not want to be streamed.
Additionally, a new option was added to preserve stream cutoff point on
disconnections/reconnections, so that if you get disconnected while
streaming, when it reconnects, it will reconnect right at the point
where it left off. This will probably be quite useful for a number of
applications in addition to regular delay, such as setting the delay to
1 second and then using this feature to minimize, for example, a
critical stream such as a tournament stream from getting any of its
stream data cut off. However, using this feature will of course cause
the stream data to buffer and increase delay (and memory usage) while
it's in the process of reconnecting.
2015-09-06 15:39:46 -07:00
|
|
|
output->highest_audio_ts = 0;
|
|
|
|
output->highest_video_ts = 0;
|
2019-06-22 22:13:45 -07:00
|
|
|
output->video_offset = 0;
|
libobs: Add encoded output delay support
This feature allows a user to delay an output (as long as the output
itself supports it). Needless to say this intended for live streams,
where users may want to delay their streams to prevent stream sniping,
cheating, and other such things.
The design this time was a bit more elaborate, but still simple in
design: the user can now schedule stops/starts without having to wait
for the stream itself to stop before being able to take any action.
Optionally, they can also forcibly stop stream (and delay) in case
something happens which they might not want to be streamed.
Additionally, a new option was added to preserve stream cutoff point on
disconnections/reconnections, so that if you get disconnected while
streaming, when it reconnects, it will reconnect right at the point
where it left off. This will probably be quite useful for a number of
applications in addition to regular delay, such as setting the delay to
1 second and then using this feature to minimize, for example, a
critical stream such as a tournament stream from getting any of its
stream data cut off. However, using this feature will of course cause
the stream data to buffer and increase delay (and memory usage) while
it's in the process of reconnecting.
2015-09-06 15:39:46 -07:00
|
|
|
|
|
|
|
for (size_t i = 0; i < MAX_AUDIO_MIXES; i++)
|
2018-11-15 17:23:38 -08:00
|
|
|
output->audio_offsets[i] = 0;
|
libobs: Add encoded output delay support
This feature allows a user to delay an output (as long as the output
itself supports it). Needless to say this intended for live streams,
where users may want to delay their streams to prevent stream sniping,
cheating, and other such things.
The design this time was a bit more elaborate, but still simple in
design: the user can now schedule stops/starts without having to wait
for the stream itself to stop before being able to take any action.
Optionally, they can also forcibly stop stream (and delay) in case
something happens which they might not want to be streamed.
Additionally, a new option was added to preserve stream cutoff point on
disconnections/reconnections, so that if you get disconnected while
streaming, when it reconnects, it will reconnect right at the point
where it left off. This will probably be quite useful for a number of
applications in addition to regular delay, such as setting the delay to
1 second and then using this feature to minimize, for example, a
critical stream such as a tournament stream from getting any of its
stream data cut off. However, using this feature will of course cause
the stream data to buffer and increase delay (and memory usage) while
it's in the process of reconnecting.
2015-09-06 15:39:46 -07:00
|
|
|
|
|
|
|
free_packets(output);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline bool preserve_active(struct obs_output *output)
|
|
|
|
{
|
|
|
|
return (output->delay_flags & OBS_OUTPUT_DELAY_PRESERVE) != 0;
|
|
|
|
}
|
|
|
|
|
Implement encoder usage with outputs
- Make it so that encoders can be assigned to outputs. If an encoder
is destroyed, it will automatically remove itself from that output.
I specifically didn't want to do reference counting because it leaves
too much potential for unchecked references and it just felt like it
would be more trouble than it's worth.
- Add a 'flags' value to the output definition structure. This lets
the output specify if it uses video/audio, and whether the output is
meant to be used with OBS encoders or not.
- Remove boilerplate code for outputs. This makes it easier to program
outputs. The boilerplate code involved before was mostly just
involving connecting to the audio/video data streams directly in each
output plugin.
Instead of doing that, simply add plugin callback functions for
receiving video/audio (either encoded or non-encoded, whichever it's
set to use), and then call obs_output_begin_data_capture and
obs_output_end_data_capture to automatically handle setting up
connections to raw or encoded video/audio streams for the plugin.
- Remove 'active' function from output callbacks, as it's no longer
really needed now that the libobs output context automatically knows
when the output is active or not.
- Make it so that an encoder cannot be destroyed until all data
connections to the encoder have been removed.
- Change the 'start' and 'stop' functions in the encoder interface to
just an 'initialize' callback, which initializes the encoder.
- Make it so that the encoder must be initialized first before the data
stream can be started. The reason why initialization was separated
from starting the encoder stream was because we need to be able to
check that the settings used with the encoder *can* be used first.
This problem was especially annoying if you had both video/audio
encoding. Before, you'd have to check the return value from
obs_encoder_start, and if that second encoder fails, then you
basically had to stop the first encoder again, making for
unnecessary boilerplate code whenever starting up two encoders.
2014-03-27 21:50:15 -07:00
|
|
|
static void hook_data_capture(struct obs_output *output, bool encoded,
|
2019-06-22 22:13:45 -07:00
|
|
|
bool has_video, bool has_audio)
|
Implement encoder usage with outputs
- Make it so that encoders can be assigned to outputs. If an encoder
is destroyed, it will automatically remove itself from that output.
I specifically didn't want to do reference counting because it leaves
too much potential for unchecked references and it just felt like it
would be more trouble than it's worth.
- Add a 'flags' value to the output definition structure. This lets
the output specify if it uses video/audio, and whether the output is
meant to be used with OBS encoders or not.
- Remove boilerplate code for outputs. This makes it easier to program
outputs. The boilerplate code involved before was mostly just
involving connecting to the audio/video data streams directly in each
output plugin.
Instead of doing that, simply add plugin callback functions for
receiving video/audio (either encoded or non-encoded, whichever it's
set to use), and then call obs_output_begin_data_capture and
obs_output_end_data_capture to automatically handle setting up
connections to raw or encoded video/audio streams for the plugin.
- Remove 'active' function from output callbacks, as it's no longer
really needed now that the libobs output context automatically knows
when the output is active or not.
- Make it so that an encoder cannot be destroyed until all data
connections to the encoder have been removed.
- Change the 'start' and 'stop' functions in the encoder interface to
just an 'initialize' callback, which initializes the encoder.
- Make it so that the encoder must be initialized first before the data
stream can be started. The reason why initialization was separated
from starting the encoder stream was because we need to be able to
check that the settings used with the encoder *can* be used first.
This problem was especially annoying if you had both video/audio
encoding. Before, you'd have to check the return value from
obs_encoder_start, and if that second encoder fails, then you
basically had to stop the first encoder again, making for
unnecessary boilerplate code whenever starting up two encoders.
2014-03-27 21:50:15 -07:00
|
|
|
{
|
(API Change) Add support for multiple audio mixers
API changed:
--------------------------
void obs_output_set_audio_encoder(
obs_output_t *output,
obs_encoder_t *encoder);
obs_encoder_t *obs_output_get_audio_encoder(
const obs_output_t *output);
obs_encoder_t *obs_audio_encoder_create(
const char *id,
const char *name,
obs_data_t *settings);
Changed to:
--------------------------
/* 'idx' specifies the track index of the output */
void obs_output_set_audio_encoder(
obs_output_t *output,
obs_encoder_t *encoder,
size_t idx);
/* 'idx' specifies the track index of the output */
obs_encoder_t *obs_output_get_audio_encoder(
const obs_output_t *output,
size_t idx);
/* 'mixer_idx' specifies the mixer index to capture audio from */
obs_encoder_t *obs_audio_encoder_create(
const char *id,
const char *name,
obs_data_t *settings,
size_t mixer_idx);
Overview
--------------------------
This feature allows multiple audio mixers to be used at a time. This
capability was able to be added with surprisingly very little extra
overhead. Audio will not be mixed unless it's assigned to a specific
mixer, and mixers will not mix unless they have an active mix
connection.
Mostly this will be useful for being able to separate out specific audio
for recording versus streaming, but will also be useful for certain
streaming services that support multiple audio streams via RTMP.
I didn't want to use a variable amount of mixers due to the desire to
reduce heap allocations, so currently I set the limit to 4 simultaneous
mixers; this number can be increased later if needed, but honestly I
feel like it's just the right number to use.
Sources:
Sources can now specify which audio mixers their audio is mixed to; this
can be a single mixer or multiple mixers at a time. The
obs_source_set_audio_mixers function sets the audio mixer which an audio
source applies to. For example, 0xF would mean that the source applies
to all four mixers.
Audio Encoders:
Audio encoders now must specify which specific audio mixer they use when
they encode audio data.
Outputs:
Outputs that use encoders can now support multiple audio tracks at once
if they have the OBS_OUTPUT_MULTI_TRACK capability flag set. This is
mostly only useful for certain types of RTMP transmissions, though may
be useful for file formats that support multiple audio tracks as well
later on.
2015-01-14 02:12:08 -08:00
|
|
|
encoded_callback_t encoded_callback;
|
2014-04-04 00:30:37 -07:00
|
|
|
|
Implement encoder usage with outputs
- Make it so that encoders can be assigned to outputs. If an encoder
is destroyed, it will automatically remove itself from that output.
I specifically didn't want to do reference counting because it leaves
too much potential for unchecked references and it just felt like it
would be more trouble than it's worth.
- Add a 'flags' value to the output definition structure. This lets
the output specify if it uses video/audio, and whether the output is
meant to be used with OBS encoders or not.
- Remove boilerplate code for outputs. This makes it easier to program
outputs. The boilerplate code involved before was mostly just
involving connecting to the audio/video data streams directly in each
output plugin.
Instead of doing that, simply add plugin callback functions for
receiving video/audio (either encoded or non-encoded, whichever it's
set to use), and then call obs_output_begin_data_capture and
obs_output_end_data_capture to automatically handle setting up
connections to raw or encoded video/audio streams for the plugin.
- Remove 'active' function from output callbacks, as it's no longer
really needed now that the libobs output context automatically knows
when the output is active or not.
- Make it so that an encoder cannot be destroyed until all data
connections to the encoder have been removed.
- Change the 'start' and 'stop' functions in the encoder interface to
just an 'initialize' callback, which initializes the encoder.
- Make it so that the encoder must be initialized first before the data
stream can be started. The reason why initialization was separated
from starting the encoder stream was because we need to be able to
check that the settings used with the encoder *can* be used first.
This problem was especially annoying if you had both video/audio
encoding. Before, you'd have to check the return value from
obs_encoder_start, and if that second encoder fails, then you
basically had to stop the first encoder again, making for
unnecessary boilerplate code whenever starting up two encoders.
2014-03-27 21:50:15 -07:00
|
|
|
if (encoded) {
|
libobs: Add encoded output delay support
This feature allows a user to delay an output (as long as the output
itself supports it). Needless to say this intended for live streams,
where users may want to delay their streams to prevent stream sniping,
cheating, and other such things.
The design this time was a bit more elaborate, but still simple in
design: the user can now schedule stops/starts without having to wait
for the stream itself to stop before being able to take any action.
Optionally, they can also forcibly stop stream (and delay) in case
something happens which they might not want to be streamed.
Additionally, a new option was added to preserve stream cutoff point on
disconnections/reconnections, so that if you get disconnected while
streaming, when it reconnects, it will reconnect right at the point
where it left off. This will probably be quite useful for a number of
applications in addition to regular delay, such as setting the delay to
1 second and then using this feature to minimize, for example, a
critical stream such as a tournament stream from getting any of its
stream data cut off. However, using this feature will of course cause
the stream data to buffer and increase delay (and memory usage) while
it's in the process of reconnecting.
2015-09-06 15:39:46 -07:00
|
|
|
pthread_mutex_lock(&output->interleaved_mutex);
|
|
|
|
reset_packet_data(output);
|
|
|
|
pthread_mutex_unlock(&output->interleaved_mutex);
|
2014-04-04 23:21:19 -07:00
|
|
|
|
2019-06-22 22:13:45 -07:00
|
|
|
encoded_callback = (has_video && has_audio)
|
|
|
|
? interleave_packets
|
|
|
|
: default_encoded_callback;
|
2014-04-04 00:30:37 -07:00
|
|
|
|
libobs: Add encoded output delay support
This feature allows a user to delay an output (as long as the output
itself supports it). Needless to say this intended for live streams,
where users may want to delay their streams to prevent stream sniping,
cheating, and other such things.
The design this time was a bit more elaborate, but still simple in
design: the user can now schedule stops/starts without having to wait
for the stream itself to stop before being able to take any action.
Optionally, they can also forcibly stop stream (and delay) in case
something happens which they might not want to be streamed.
Additionally, a new option was added to preserve stream cutoff point on
disconnections/reconnections, so that if you get disconnected while
streaming, when it reconnects, it will reconnect right at the point
where it left off. This will probably be quite useful for a number of
applications in addition to regular delay, such as setting the delay to
1 second and then using this feature to minimize, for example, a
critical stream such as a tournament stream from getting any of its
stream data cut off. However, using this feature will of course cause
the stream data to buffer and increase delay (and memory usage) while
it's in the process of reconnecting.
2015-09-06 15:39:46 -07:00
|
|
|
if (output->delay_sec) {
|
|
|
|
output->active_delay_ns =
|
|
|
|
(uint64_t)output->delay_sec * 1000000000ULL;
|
|
|
|
output->delay_cur_flags = output->delay_flags;
|
|
|
|
output->delay_callback = encoded_callback;
|
|
|
|
encoded_callback = process_delay;
|
2016-06-20 16:07:29 -07:00
|
|
|
os_atomic_set_bool(&output->delay_active, true);
|
libobs: Add encoded output delay support
This feature allows a user to delay an output (as long as the output
itself supports it). Needless to say this intended for live streams,
where users may want to delay their streams to prevent stream sniping,
cheating, and other such things.
The design this time was a bit more elaborate, but still simple in
design: the user can now schedule stops/starts without having to wait
for the stream itself to stop before being able to take any action.
Optionally, they can also forcibly stop stream (and delay) in case
something happens which they might not want to be streamed.
Additionally, a new option was added to preserve stream cutoff point on
disconnections/reconnections, so that if you get disconnected while
streaming, when it reconnects, it will reconnect right at the point
where it left off. This will probably be quite useful for a number of
applications in addition to regular delay, such as setting the delay to
1 second and then using this feature to minimize, for example, a
critical stream such as a tournament stream from getting any of its
stream data cut off. However, using this feature will of course cause
the stream data to buffer and increase delay (and memory usage) while
it's in the process of reconnecting.
2015-09-06 15:39:46 -07:00
|
|
|
|
2019-06-22 22:13:45 -07:00
|
|
|
blog(LOG_INFO,
|
|
|
|
"Output '%s': %" PRIu32 " second delay "
|
|
|
|
"active, preserve on disconnect is %s",
|
|
|
|
output->context.name, output->delay_sec,
|
|
|
|
preserve_active(output) ? "on" : "off");
|
libobs: Add encoded output delay support
This feature allows a user to delay an output (as long as the output
itself supports it). Needless to say this intended for live streams,
where users may want to delay their streams to prevent stream sniping,
cheating, and other such things.
The design this time was a bit more elaborate, but still simple in
design: the user can now schedule stops/starts without having to wait
for the stream itself to stop before being able to take any action.
Optionally, they can also forcibly stop stream (and delay) in case
something happens which they might not want to be streamed.
Additionally, a new option was added to preserve stream cutoff point on
disconnections/reconnections, so that if you get disconnected while
streaming, when it reconnects, it will reconnect right at the point
where it left off. This will probably be quite useful for a number of
applications in addition to regular delay, such as setting the delay to
1 second and then using this feature to minimize, for example, a
critical stream such as a tournament stream from getting any of its
stream data cut off. However, using this feature will of course cause
the stream data to buffer and increase delay (and memory usage) while
it's in the process of reconnecting.
2015-09-06 15:39:46 -07:00
|
|
|
}
|
|
|
|
|
2016-01-30 09:35:23 -08:00
|
|
|
if (has_audio)
|
|
|
|
start_audio_encoders(output, encoded_callback);
|
Implement encoder usage with outputs
- Make it so that encoders can be assigned to outputs. If an encoder
is destroyed, it will automatically remove itself from that output.
I specifically didn't want to do reference counting because it leaves
too much potential for unchecked references and it just felt like it
would be more trouble than it's worth.
- Add a 'flags' value to the output definition structure. This lets
the output specify if it uses video/audio, and whether the output is
meant to be used with OBS encoders or not.
- Remove boilerplate code for outputs. This makes it easier to program
outputs. The boilerplate code involved before was mostly just
involving connecting to the audio/video data streams directly in each
output plugin.
Instead of doing that, simply add plugin callback functions for
receiving video/audio (either encoded or non-encoded, whichever it's
set to use), and then call obs_output_begin_data_capture and
obs_output_end_data_capture to automatically handle setting up
connections to raw or encoded video/audio streams for the plugin.
- Remove 'active' function from output callbacks, as it's no longer
really needed now that the libobs output context automatically knows
when the output is active or not.
- Make it so that an encoder cannot be destroyed until all data
connections to the encoder have been removed.
- Change the 'start' and 'stop' functions in the encoder interface to
just an 'initialize' callback, which initializes the encoder.
- Make it so that the encoder must be initialized first before the data
stream can be started. The reason why initialization was separated
from starting the encoder stream was because we need to be able to
check that the settings used with the encoder *can* be used first.
This problem was especially annoying if you had both video/audio
encoding. Before, you'd have to check the return value from
obs_encoder_start, and if that second encoder fails, then you
basically had to stop the first encoder again, making for
unnecessary boilerplate code whenever starting up two encoders.
2014-03-27 21:50:15 -07:00
|
|
|
if (has_video)
|
|
|
|
obs_encoder_start(output->video_encoder,
|
2019-06-22 22:13:45 -07:00
|
|
|
encoded_callback, output);
|
Implement encoder usage with outputs
- Make it so that encoders can be assigned to outputs. If an encoder
is destroyed, it will automatically remove itself from that output.
I specifically didn't want to do reference counting because it leaves
too much potential for unchecked references and it just felt like it
would be more trouble than it's worth.
- Add a 'flags' value to the output definition structure. This lets
the output specify if it uses video/audio, and whether the output is
meant to be used with OBS encoders or not.
- Remove boilerplate code for outputs. This makes it easier to program
outputs. The boilerplate code involved before was mostly just
involving connecting to the audio/video data streams directly in each
output plugin.
Instead of doing that, simply add plugin callback functions for
receiving video/audio (either encoded or non-encoded, whichever it's
set to use), and then call obs_output_begin_data_capture and
obs_output_end_data_capture to automatically handle setting up
connections to raw or encoded video/audio streams for the plugin.
- Remove 'active' function from output callbacks, as it's no longer
really needed now that the libobs output context automatically knows
when the output is active or not.
- Make it so that an encoder cannot be destroyed until all data
connections to the encoder have been removed.
- Change the 'start' and 'stop' functions in the encoder interface to
just an 'initialize' callback, which initializes the encoder.
- Make it so that the encoder must be initialized first before the data
stream can be started. The reason why initialization was separated
from starting the encoder stream was because we need to be able to
check that the settings used with the encoder *can* be used first.
This problem was especially annoying if you had both video/audio
encoding. Before, you'd have to check the return value from
obs_encoder_start, and if that second encoder fails, then you
basically had to stop the first encoder again, making for
unnecessary boilerplate code whenever starting up two encoders.
2014-03-27 21:50:15 -07:00
|
|
|
} else {
|
|
|
|
if (has_video)
|
2018-01-31 18:54:36 -08:00
|
|
|
start_raw_video(output->video,
|
Implement encoder usage with outputs
- Make it so that encoders can be assigned to outputs. If an encoder
is destroyed, it will automatically remove itself from that output.
I specifically didn't want to do reference counting because it leaves
too much potential for unchecked references and it just felt like it
would be more trouble than it's worth.
- Add a 'flags' value to the output definition structure. This lets
the output specify if it uses video/audio, and whether the output is
meant to be used with OBS encoders or not.
- Remove boilerplate code for outputs. This makes it easier to program
outputs. The boilerplate code involved before was mostly just
involving connecting to the audio/video data streams directly in each
output plugin.
Instead of doing that, simply add plugin callback functions for
receiving video/audio (either encoded or non-encoded, whichever it's
set to use), and then call obs_output_begin_data_capture and
obs_output_end_data_capture to automatically handle setting up
connections to raw or encoded video/audio streams for the plugin.
- Remove 'active' function from output callbacks, as it's no longer
really needed now that the libobs output context automatically knows
when the output is active or not.
- Make it so that an encoder cannot be destroyed until all data
connections to the encoder have been removed.
- Change the 'start' and 'stop' functions in the encoder interface to
just an 'initialize' callback, which initializes the encoder.
- Make it so that the encoder must be initialized first before the data
stream can be started. The reason why initialization was separated
from starting the encoder stream was because we need to be able to
check that the settings used with the encoder *can* be used first.
This problem was especially annoying if you had both video/audio
encoding. Before, you'd have to check the return value from
obs_encoder_start, and if that second encoder fails, then you
basically had to stop the first encoder again, making for
unnecessary boilerplate code whenever starting up two encoders.
2014-03-27 21:50:15 -07:00
|
|
|
get_video_conversion(output),
|
2014-07-06 14:16:31 -07:00
|
|
|
default_raw_video_callback, output);
|
Implement encoder usage with outputs
- Make it so that encoders can be assigned to outputs. If an encoder
is destroyed, it will automatically remove itself from that output.
I specifically didn't want to do reference counting because it leaves
too much potential for unchecked references and it just felt like it
would be more trouble than it's worth.
- Add a 'flags' value to the output definition structure. This lets
the output specify if it uses video/audio, and whether the output is
meant to be used with OBS encoders or not.
- Remove boilerplate code for outputs. This makes it easier to program
outputs. The boilerplate code involved before was mostly just
involving connecting to the audio/video data streams directly in each
output plugin.
Instead of doing that, simply add plugin callback functions for
receiving video/audio (either encoded or non-encoded, whichever it's
set to use), and then call obs_output_begin_data_capture and
obs_output_end_data_capture to automatically handle setting up
connections to raw or encoded video/audio streams for the plugin.
- Remove 'active' function from output callbacks, as it's no longer
really needed now that the libobs output context automatically knows
when the output is active or not.
- Make it so that an encoder cannot be destroyed until all data
connections to the encoder have been removed.
- Change the 'start' and 'stop' functions in the encoder interface to
just an 'initialize' callback, which initializes the encoder.
- Make it so that the encoder must be initialized first before the data
stream can be started. The reason why initialization was separated
from starting the encoder stream was because we need to be able to
check that the settings used with the encoder *can* be used first.
This problem was especially annoying if you had both video/audio
encoding. Before, you'd have to check the return value from
obs_encoder_start, and if that second encoder fails, then you
basically had to stop the first encoder again, making for
unnecessary boilerplate code whenever starting up two encoders.
2014-03-27 21:50:15 -07:00
|
|
|
if (has_audio)
|
2018-10-04 20:04:40 -07:00
|
|
|
start_raw_audio(output);
|
Implement encoder usage with outputs
- Make it so that encoders can be assigned to outputs. If an encoder
is destroyed, it will automatically remove itself from that output.
I specifically didn't want to do reference counting because it leaves
too much potential for unchecked references and it just felt like it
would be more trouble than it's worth.
- Add a 'flags' value to the output definition structure. This lets
the output specify if it uses video/audio, and whether the output is
meant to be used with OBS encoders or not.
- Remove boilerplate code for outputs. This makes it easier to program
outputs. The boilerplate code involved before was mostly just
involving connecting to the audio/video data streams directly in each
output plugin.
Instead of doing that, simply add plugin callback functions for
receiving video/audio (either encoded or non-encoded, whichever it's
set to use), and then call obs_output_begin_data_capture and
obs_output_end_data_capture to automatically handle setting up
connections to raw or encoded video/audio streams for the plugin.
- Remove 'active' function from output callbacks, as it's no longer
really needed now that the libobs output context automatically knows
when the output is active or not.
- Make it so that an encoder cannot be destroyed until all data
connections to the encoder have been removed.
- Change the 'start' and 'stop' functions in the encoder interface to
just an 'initialize' callback, which initializes the encoder.
- Make it so that the encoder must be initialized first before the data
stream can be started. The reason why initialization was separated
from starting the encoder stream was because we need to be able to
check that the settings used with the encoder *can* be used first.
This problem was especially annoying if you had both video/audio
encoding. Before, you'd have to check the return value from
obs_encoder_start, and if that second encoder fails, then you
basically had to stop the first encoder again, making for
unnecessary boilerplate code whenever starting up two encoders.
2014-03-27 21:50:15 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-07-02 16:38:29 -07:00
|
|
|
static inline void signal_start(struct obs_output *output)
|
|
|
|
{
|
|
|
|
do_output_signal(output, "start");
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void signal_reconnect(struct obs_output *output)
|
|
|
|
{
|
2016-01-18 20:01:58 -08:00
|
|
|
struct calldata params;
|
|
|
|
uint8_t stack[128];
|
|
|
|
|
|
|
|
calldata_init_fixed(¶ms, stack, sizeof(stack));
|
2015-05-10 16:05:26 -07:00
|
|
|
calldata_set_int(¶ms, "timeout_sec",
|
2022-06-25 16:19:48 -07:00
|
|
|
output->reconnect_retry_cur_msec / 1000);
|
2015-05-10 16:05:26 -07:00
|
|
|
calldata_set_ptr(¶ms, "output", output);
|
|
|
|
signal_handler_signal(output->context.signals, "reconnect", ¶ms);
|
2014-07-02 16:38:29 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void signal_reconnect_success(struct obs_output *output)
|
|
|
|
{
|
|
|
|
do_output_signal(output, "reconnect_success");
|
|
|
|
}
|
|
|
|
|
2016-06-11 11:42:29 -07:00
|
|
|
static inline void signal_stop(struct obs_output *output)
|
2014-04-01 11:55:18 -07:00
|
|
|
{
|
2016-01-18 20:01:58 -08:00
|
|
|
struct calldata params;
|
|
|
|
|
2017-05-15 03:04:11 -07:00
|
|
|
calldata_init(¶ms);
|
2021-10-23 05:31:16 -07:00
|
|
|
calldata_set_string(¶ms, "last_error",
|
|
|
|
obs_output_get_last_error(output));
|
2016-06-11 11:42:29 -07:00
|
|
|
calldata_set_int(¶ms, "code", output->stop_code);
|
2014-08-05 17:49:28 -07:00
|
|
|
calldata_set_ptr(¶ms, "output", output);
|
2017-05-15 03:04:11 -07:00
|
|
|
|
libobs: Add services API, reduce repeated code
Add API for streaming services. The services API simplifies the
creation of custom service features and user interface.
Custom streaming services later on will be able to do things such as:
- Be able to use service-specific APIs via modules, allowing a more
direct means of communicating with the service and requesting or
setting service-specific information
- Get URL/stream key via other means of authentication such as OAuth,
or be able to build custom URLs for services that require that sort
of thing.
- Query information (such as viewer count, chat, follower
notifications, and other information)
- Set channel information (such as current game, current channel title,
activating commercials)
Also, I reduce some repeated code that was used for all libobs objects.
This includes the name of the object, the private data, settings, as
well as the signal and procedure handlers.
I also switched to using linked lists for the global object lists,
rather than using an array of pointers (you could say it was..
pointless.) ..Anyway, the linked list info is also stored in the shared
context data structure.
2014-04-19 20:38:53 -07:00
|
|
|
signal_handler_signal(output->context.signals, "stop", ¶ms);
|
2017-05-15 03:04:11 -07:00
|
|
|
|
|
|
|
calldata_free(¶ms);
|
2014-04-01 11:55:18 -07:00
|
|
|
}
|
|
|
|
|
2014-09-26 15:25:59 -07:00
|
|
|
static inline void convert_flags(const struct obs_output *output,
|
2019-06-22 22:13:45 -07:00
|
|
|
uint32_t flags, bool *encoded, bool *has_video,
|
|
|
|
bool *has_audio, bool *has_service)
|
Implement encoder usage with outputs
- Make it so that encoders can be assigned to outputs. If an encoder
is destroyed, it will automatically remove itself from that output.
I specifically didn't want to do reference counting because it leaves
too much potential for unchecked references and it just felt like it
would be more trouble than it's worth.
- Add a 'flags' value to the output definition structure. This lets
the output specify if it uses video/audio, and whether the output is
meant to be used with OBS encoders or not.
- Remove boilerplate code for outputs. This makes it easier to program
outputs. The boilerplate code involved before was mostly just
involving connecting to the audio/video data streams directly in each
output plugin.
Instead of doing that, simply add plugin callback functions for
receiving video/audio (either encoded or non-encoded, whichever it's
set to use), and then call obs_output_begin_data_capture and
obs_output_end_data_capture to automatically handle setting up
connections to raw or encoded video/audio streams for the plugin.
- Remove 'active' function from output callbacks, as it's no longer
really needed now that the libobs output context automatically knows
when the output is active or not.
- Make it so that an encoder cannot be destroyed until all data
connections to the encoder have been removed.
- Change the 'start' and 'stop' functions in the encoder interface to
just an 'initialize' callback, which initializes the encoder.
- Make it so that the encoder must be initialized first before the data
stream can be started. The reason why initialization was separated
from starting the encoder stream was because we need to be able to
check that the settings used with the encoder *can* be used first.
This problem was especially annoying if you had both video/audio
encoding. Before, you'd have to check the return value from
obs_encoder_start, and if that second encoder fails, then you
basically had to stop the first encoder again, making for
unnecessary boilerplate code whenever starting up two encoders.
2014-03-27 21:50:15 -07:00
|
|
|
{
|
|
|
|
*encoded = (output->info.flags & OBS_OUTPUT_ENCODED) != 0;
|
|
|
|
if (!flags)
|
|
|
|
flags = output->info.flags;
|
|
|
|
else
|
|
|
|
flags &= output->info.flags;
|
|
|
|
|
2019-06-22 22:13:45 -07:00
|
|
|
*has_video = (flags & OBS_OUTPUT_VIDEO) != 0;
|
|
|
|
*has_audio = (flags & OBS_OUTPUT_AUDIO) != 0;
|
obs-studio UI: Implement stream settings UI
- Updated the services API so that it links up with an output and
the output gets data from that service rather than via settings.
This allows the service context to have control over how an output is
used, and makes it so that the URL/key/etc isn't necessarily some
static setting.
Also, if the service is attached to an output, it will stick around
until the output is destroyed.
- The settings interface has been updated so that it can allow the
usage of service plugins. What this means is that now you can create
a service plugin that can control aspects of the stream, and it
allows each service to create their own user interface if they create
a service plugin module.
- Testing out saving of current service information. Saves/loads from
JSON in to obs_data_t, seems to be working quite nicely, and the
service object information is saved/preserved on exit, and loaded
again on startup.
- I agonized over the settings user interface for days, and eventually
I just decided that the only way that users weren't going to be
fumbling over options was to split up the settings in to simple/basic
output, pre-configured, and then advanced for advanced use (such as
multiple outputs or services, which I'll implement later).
This was particularly painful to really design right, I wanted more
features and wanted to include everything in one interface but
ultimately just realized from experience that users are just not
technically knowledgable about it and will end up fumbling with the
settings rather than getting things done.
Basically, what this means is that casual users only have to enter in
about 3 things to configure their stream: Stream key, audio bitrate,
and video bitrate. I am really happy with this interface for those
types of users, but it definitely won't be sufficient for advanced
usage or for custom outputs, so that stuff will have to be separated.
- Improved the JSON usage for the 'common streaming services' context,
I realized that JSON arrays are there to ensure sorting, while
forgetting that general items are optimized for hashing. So
basically I'm just using arrays now to sort items in it.
2014-04-24 01:49:07 -07:00
|
|
|
*has_service = (flags & OBS_OUTPUT_SERVICE) != 0;
|
Implement encoder usage with outputs
- Make it so that encoders can be assigned to outputs. If an encoder
is destroyed, it will automatically remove itself from that output.
I specifically didn't want to do reference counting because it leaves
too much potential for unchecked references and it just felt like it
would be more trouble than it's worth.
- Add a 'flags' value to the output definition structure. This lets
the output specify if it uses video/audio, and whether the output is
meant to be used with OBS encoders or not.
- Remove boilerplate code for outputs. This makes it easier to program
outputs. The boilerplate code involved before was mostly just
involving connecting to the audio/video data streams directly in each
output plugin.
Instead of doing that, simply add plugin callback functions for
receiving video/audio (either encoded or non-encoded, whichever it's
set to use), and then call obs_output_begin_data_capture and
obs_output_end_data_capture to automatically handle setting up
connections to raw or encoded video/audio streams for the plugin.
- Remove 'active' function from output callbacks, as it's no longer
really needed now that the libobs output context automatically knows
when the output is active or not.
- Make it so that an encoder cannot be destroyed until all data
connections to the encoder have been removed.
- Change the 'start' and 'stop' functions in the encoder interface to
just an 'initialize' callback, which initializes the encoder.
- Make it so that the encoder must be initialized first before the data
stream can be started. The reason why initialization was separated
from starting the encoder stream was because we need to be able to
check that the settings used with the encoder *can* be used first.
This problem was especially annoying if you had both video/audio
encoding. Before, you'd have to check the return value from
obs_encoder_start, and if that second encoder fails, then you
basically had to stop the first encoder again, making for
unnecessary boilerplate code whenever starting up two encoders.
2014-03-27 21:50:15 -07:00
|
|
|
}
|
|
|
|
|
2014-09-26 15:25:59 -07:00
|
|
|
bool obs_output_can_begin_data_capture(const obs_output_t *output,
|
2019-06-22 22:13:45 -07:00
|
|
|
uint32_t flags)
|
2014-04-01 11:55:18 -07:00
|
|
|
{
|
obs-studio UI: Implement stream settings UI
- Updated the services API so that it links up with an output and
the output gets data from that service rather than via settings.
This allows the service context to have control over how an output is
used, and makes it so that the URL/key/etc isn't necessarily some
static setting.
Also, if the service is attached to an output, it will stick around
until the output is destroyed.
- The settings interface has been updated so that it can allow the
usage of service plugins. What this means is that now you can create
a service plugin that can control aspects of the stream, and it
allows each service to create their own user interface if they create
a service plugin module.
- Testing out saving of current service information. Saves/loads from
JSON in to obs_data_t, seems to be working quite nicely, and the
service object information is saved/preserved on exit, and loaded
again on startup.
- I agonized over the settings user interface for days, and eventually
I just decided that the only way that users weren't going to be
fumbling over options was to split up the settings in to simple/basic
output, pre-configured, and then advanced for advanced use (such as
multiple outputs or services, which I'll implement later).
This was particularly painful to really design right, I wanted more
features and wanted to include everything in one interface but
ultimately just realized from experience that users are just not
technically knowledgable about it and will end up fumbling with the
settings rather than getting things done.
Basically, what this means is that casual users only have to enter in
about 3 things to configure their stream: Stream key, audio bitrate,
and video bitrate. I am really happy with this interface for those
types of users, but it definitely won't be sufficient for advanced
usage or for custom outputs, so that stuff will have to be separated.
- Improved the JSON usage for the 'common streaming services' context,
I realized that JSON arrays are there to ensure sorting, while
forgetting that general items are optimized for hashing. So
basically I'm just using arrays now to sort items in it.
2014-04-24 01:49:07 -07:00
|
|
|
bool encoded, has_video, has_audio, has_service;
|
2014-04-01 11:55:18 -07:00
|
|
|
|
2015-10-17 02:51:13 -07:00
|
|
|
if (!obs_output_valid(output, "obs_output_can_begin_data_capture"))
|
|
|
|
return false;
|
|
|
|
|
2019-06-22 22:13:45 -07:00
|
|
|
if (delay_active(output))
|
|
|
|
return true;
|
|
|
|
if (active(output))
|
|
|
|
return false;
|
2014-04-01 11:55:18 -07:00
|
|
|
|
2016-06-21 17:25:26 -07:00
|
|
|
if (data_capture_ending(output))
|
|
|
|
pthread_join(output->end_data_capture_thread, NULL);
|
|
|
|
|
obs-studio UI: Implement stream settings UI
- Updated the services API so that it links up with an output and
the output gets data from that service rather than via settings.
This allows the service context to have control over how an output is
used, and makes it so that the URL/key/etc isn't necessarily some
static setting.
Also, if the service is attached to an output, it will stick around
until the output is destroyed.
- The settings interface has been updated so that it can allow the
usage of service plugins. What this means is that now you can create
a service plugin that can control aspects of the stream, and it
allows each service to create their own user interface if they create
a service plugin module.
- Testing out saving of current service information. Saves/loads from
JSON in to obs_data_t, seems to be working quite nicely, and the
service object information is saved/preserved on exit, and loaded
again on startup.
- I agonized over the settings user interface for days, and eventually
I just decided that the only way that users weren't going to be
fumbling over options was to split up the settings in to simple/basic
output, pre-configured, and then advanced for advanced use (such as
multiple outputs or services, which I'll implement later).
This was particularly painful to really design right, I wanted more
features and wanted to include everything in one interface but
ultimately just realized from experience that users are just not
technically knowledgable about it and will end up fumbling with the
settings rather than getting things done.
Basically, what this means is that casual users only have to enter in
about 3 things to configure their stream: Stream key, audio bitrate,
and video bitrate. I am really happy with this interface for those
types of users, but it definitely won't be sufficient for advanced
usage or for custom outputs, so that stuff will have to be separated.
- Improved the JSON usage for the 'common streaming services' context,
I realized that JSON arrays are there to ensure sorting, while
forgetting that general items are optimized for hashing. So
basically I'm just using arrays now to sort items in it.
2014-04-24 01:49:07 -07:00
|
|
|
convert_flags(output, flags, &encoded, &has_video, &has_audio,
|
2019-06-22 22:13:45 -07:00
|
|
|
&has_service);
|
2014-04-01 11:55:18 -07:00
|
|
|
|
obs-studio UI: Implement stream settings UI
- Updated the services API so that it links up with an output and
the output gets data from that service rather than via settings.
This allows the service context to have control over how an output is
used, and makes it so that the URL/key/etc isn't necessarily some
static setting.
Also, if the service is attached to an output, it will stick around
until the output is destroyed.
- The settings interface has been updated so that it can allow the
usage of service plugins. What this means is that now you can create
a service plugin that can control aspects of the stream, and it
allows each service to create their own user interface if they create
a service plugin module.
- Testing out saving of current service information. Saves/loads from
JSON in to obs_data_t, seems to be working quite nicely, and the
service object information is saved/preserved on exit, and loaded
again on startup.
- I agonized over the settings user interface for days, and eventually
I just decided that the only way that users weren't going to be
fumbling over options was to split up the settings in to simple/basic
output, pre-configured, and then advanced for advanced use (such as
multiple outputs or services, which I'll implement later).
This was particularly painful to really design right, I wanted more
features and wanted to include everything in one interface but
ultimately just realized from experience that users are just not
technically knowledgable about it and will end up fumbling with the
settings rather than getting things done.
Basically, what this means is that casual users only have to enter in
about 3 things to configure their stream: Stream key, audio bitrate,
and video bitrate. I am really happy with this interface for those
types of users, but it definitely won't be sufficient for advanced
usage or for custom outputs, so that stuff will have to be separated.
- Improved the JSON usage for the 'common streaming services' context,
I realized that JSON arrays are there to ensure sorting, while
forgetting that general items are optimized for hashing. So
basically I'm just using arrays now to sort items in it.
2014-04-24 01:49:07 -07:00
|
|
|
return can_begin_data_capture(output, encoded, has_video, has_audio,
|
2019-06-22 22:13:45 -07:00
|
|
|
has_service);
|
2014-04-01 11:55:18 -07:00
|
|
|
}
|
|
|
|
|
(API Change) Add support for multiple audio mixers
API changed:
--------------------------
void obs_output_set_audio_encoder(
obs_output_t *output,
obs_encoder_t *encoder);
obs_encoder_t *obs_output_get_audio_encoder(
const obs_output_t *output);
obs_encoder_t *obs_audio_encoder_create(
const char *id,
const char *name,
obs_data_t *settings);
Changed to:
--------------------------
/* 'idx' specifies the track index of the output */
void obs_output_set_audio_encoder(
obs_output_t *output,
obs_encoder_t *encoder,
size_t idx);
/* 'idx' specifies the track index of the output */
obs_encoder_t *obs_output_get_audio_encoder(
const obs_output_t *output,
size_t idx);
/* 'mixer_idx' specifies the mixer index to capture audio from */
obs_encoder_t *obs_audio_encoder_create(
const char *id,
const char *name,
obs_data_t *settings,
size_t mixer_idx);
Overview
--------------------------
This feature allows multiple audio mixers to be used at a time. This
capability was able to be added with surprisingly very little extra
overhead. Audio will not be mixed unless it's assigned to a specific
mixer, and mixers will not mix unless they have an active mix
connection.
Mostly this will be useful for being able to separate out specific audio
for recording versus streaming, but will also be useful for certain
streaming services that support multiple audio streams via RTMP.
I didn't want to use a variable amount of mixers due to the desire to
reduce heap allocations, so currently I set the limit to 4 simultaneous
mixers; this number can be increased later if needed, but honestly I
feel like it's just the right number to use.
Sources:
Sources can now specify which audio mixers their audio is mixed to; this
can be a single mixer or multiple mixers at a time. The
obs_source_set_audio_mixers function sets the audio mixer which an audio
source applies to. For example, 0xF would mean that the source applies
to all four mixers.
Audio Encoders:
Audio encoders now must specify which specific audio mixer they use when
they encode audio data.
Outputs:
Outputs that use encoders can now support multiple audio tracks at once
if they have the OBS_OUTPUT_MULTI_TRACK capability flag set. This is
mostly only useful for certain types of RTMP transmissions, though may
be useful for file formats that support multiple audio tracks as well
later on.
2015-01-14 02:12:08 -08:00
|
|
|
static inline bool initialize_audio_encoders(obs_output_t *output,
|
2019-06-22 22:13:45 -07:00
|
|
|
size_t num_mixes)
|
(API Change) Add support for multiple audio mixers
API changed:
--------------------------
void obs_output_set_audio_encoder(
obs_output_t *output,
obs_encoder_t *encoder);
obs_encoder_t *obs_output_get_audio_encoder(
const obs_output_t *output);
obs_encoder_t *obs_audio_encoder_create(
const char *id,
const char *name,
obs_data_t *settings);
Changed to:
--------------------------
/* 'idx' specifies the track index of the output */
void obs_output_set_audio_encoder(
obs_output_t *output,
obs_encoder_t *encoder,
size_t idx);
/* 'idx' specifies the track index of the output */
obs_encoder_t *obs_output_get_audio_encoder(
const obs_output_t *output,
size_t idx);
/* 'mixer_idx' specifies the mixer index to capture audio from */
obs_encoder_t *obs_audio_encoder_create(
const char *id,
const char *name,
obs_data_t *settings,
size_t mixer_idx);
Overview
--------------------------
This feature allows multiple audio mixers to be used at a time. This
capability was able to be added with surprisingly very little extra
overhead. Audio will not be mixed unless it's assigned to a specific
mixer, and mixers will not mix unless they have an active mix
connection.
Mostly this will be useful for being able to separate out specific audio
for recording versus streaming, but will also be useful for certain
streaming services that support multiple audio streams via RTMP.
I didn't want to use a variable amount of mixers due to the desire to
reduce heap allocations, so currently I set the limit to 4 simultaneous
mixers; this number can be increased later if needed, but honestly I
feel like it's just the right number to use.
Sources:
Sources can now specify which audio mixers their audio is mixed to; this
can be a single mixer or multiple mixers at a time. The
obs_source_set_audio_mixers function sets the audio mixer which an audio
source applies to. For example, 0xF would mean that the source applies
to all four mixers.
Audio Encoders:
Audio encoders now must specify which specific audio mixer they use when
they encode audio data.
Outputs:
Outputs that use encoders can now support multiple audio tracks at once
if they have the OBS_OUTPUT_MULTI_TRACK capability flag set. This is
mostly only useful for certain types of RTMP transmissions, though may
be useful for file formats that support multiple audio tracks as well
later on.
2015-01-14 02:12:08 -08:00
|
|
|
{
|
|
|
|
for (size_t i = 0; i < num_mixes; i++) {
|
|
|
|
if (!obs_encoder_initialize(output->audio_encoders[i])) {
|
2019-10-09 09:27:09 -07:00
|
|
|
obs_output_set_last_error(
|
|
|
|
output, obs_encoder_get_last_error(
|
|
|
|
output->audio_encoders[i]));
|
(API Change) Add support for multiple audio mixers
API changed:
--------------------------
void obs_output_set_audio_encoder(
obs_output_t *output,
obs_encoder_t *encoder);
obs_encoder_t *obs_output_get_audio_encoder(
const obs_output_t *output);
obs_encoder_t *obs_audio_encoder_create(
const char *id,
const char *name,
obs_data_t *settings);
Changed to:
--------------------------
/* 'idx' specifies the track index of the output */
void obs_output_set_audio_encoder(
obs_output_t *output,
obs_encoder_t *encoder,
size_t idx);
/* 'idx' specifies the track index of the output */
obs_encoder_t *obs_output_get_audio_encoder(
const obs_output_t *output,
size_t idx);
/* 'mixer_idx' specifies the mixer index to capture audio from */
obs_encoder_t *obs_audio_encoder_create(
const char *id,
const char *name,
obs_data_t *settings,
size_t mixer_idx);
Overview
--------------------------
This feature allows multiple audio mixers to be used at a time. This
capability was able to be added with surprisingly very little extra
overhead. Audio will not be mixed unless it's assigned to a specific
mixer, and mixers will not mix unless they have an active mix
connection.
Mostly this will be useful for being able to separate out specific audio
for recording versus streaming, but will also be useful for certain
streaming services that support multiple audio streams via RTMP.
I didn't want to use a variable amount of mixers due to the desire to
reduce heap allocations, so currently I set the limit to 4 simultaneous
mixers; this number can be increased later if needed, but honestly I
feel like it's just the right number to use.
Sources:
Sources can now specify which audio mixers their audio is mixed to; this
can be a single mixer or multiple mixers at a time. The
obs_source_set_audio_mixers function sets the audio mixer which an audio
source applies to. For example, 0xF would mean that the source applies
to all four mixers.
Audio Encoders:
Audio encoders now must specify which specific audio mixer they use when
they encode audio data.
Outputs:
Outputs that use encoders can now support multiple audio tracks at once
if they have the OBS_OUTPUT_MULTI_TRACK capability flag set. This is
mostly only useful for certain types of RTMP transmissions, though may
be useful for file formats that support multiple audio tracks as well
later on.
2015-01-14 02:12:08 -08:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2016-01-30 12:47:53 -08:00
|
|
|
static inline obs_encoder_t *find_inactive_audio_encoder(obs_output_t *output,
|
2019-06-22 22:13:45 -07:00
|
|
|
size_t num_mixes)
|
2016-01-30 12:47:53 -08:00
|
|
|
{
|
|
|
|
for (size_t i = 0; i < num_mixes; i++) {
|
|
|
|
struct obs_encoder *audio = output->audio_encoders[i];
|
|
|
|
|
2019-06-11 16:26:09 -07:00
|
|
|
if (audio && !audio->active && !audio->paired_encoder)
|
2016-01-30 12:47:53 -08:00
|
|
|
return audio;
|
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void pair_encoders(obs_output_t *output, size_t num_mixes)
|
(API Change) Add support for multiple audio mixers
API changed:
--------------------------
void obs_output_set_audio_encoder(
obs_output_t *output,
obs_encoder_t *encoder);
obs_encoder_t *obs_output_get_audio_encoder(
const obs_output_t *output);
obs_encoder_t *obs_audio_encoder_create(
const char *id,
const char *name,
obs_data_t *settings);
Changed to:
--------------------------
/* 'idx' specifies the track index of the output */
void obs_output_set_audio_encoder(
obs_output_t *output,
obs_encoder_t *encoder,
size_t idx);
/* 'idx' specifies the track index of the output */
obs_encoder_t *obs_output_get_audio_encoder(
const obs_output_t *output,
size_t idx);
/* 'mixer_idx' specifies the mixer index to capture audio from */
obs_encoder_t *obs_audio_encoder_create(
const char *id,
const char *name,
obs_data_t *settings,
size_t mixer_idx);
Overview
--------------------------
This feature allows multiple audio mixers to be used at a time. This
capability was able to be added with surprisingly very little extra
overhead. Audio will not be mixed unless it's assigned to a specific
mixer, and mixers will not mix unless they have an active mix
connection.
Mostly this will be useful for being able to separate out specific audio
for recording versus streaming, but will also be useful for certain
streaming services that support multiple audio streams via RTMP.
I didn't want to use a variable amount of mixers due to the desire to
reduce heap allocations, so currently I set the limit to 4 simultaneous
mixers; this number can be increased later if needed, but honestly I
feel like it's just the right number to use.
Sources:
Sources can now specify which audio mixers their audio is mixed to; this
can be a single mixer or multiple mixers at a time. The
obs_source_set_audio_mixers function sets the audio mixer which an audio
source applies to. For example, 0xF would mean that the source applies
to all four mixers.
Audio Encoders:
Audio encoders now must specify which specific audio mixer they use when
they encode audio data.
Outputs:
Outputs that use encoders can now support multiple audio tracks at once
if they have the OBS_OUTPUT_MULTI_TRACK capability flag set. This is
mostly only useful for certain types of RTMP transmissions, though may
be useful for file formats that support multiple audio tracks as well
later on.
2015-01-14 02:12:08 -08:00
|
|
|
{
|
2016-01-30 12:47:53 -08:00
|
|
|
struct obs_encoder *video = output->video_encoder;
|
2019-06-22 22:13:45 -07:00
|
|
|
struct obs_encoder *audio =
|
|
|
|
find_inactive_audio_encoder(output, num_mixes);
|
2016-01-25 01:42:06 -08:00
|
|
|
|
2016-01-30 12:47:53 -08:00
|
|
|
if (video && audio) {
|
2016-01-25 01:42:06 -08:00
|
|
|
pthread_mutex_lock(&audio->init_mutex);
|
|
|
|
pthread_mutex_lock(&video->init_mutex);
|
|
|
|
|
2019-06-22 22:13:45 -07:00
|
|
|
if (!audio->active && !video->active &&
|
|
|
|
!video->paired_encoder && !audio->paired_encoder) {
|
2016-01-25 01:42:06 -08:00
|
|
|
|
|
|
|
audio->wait_for_video = true;
|
|
|
|
audio->paired_encoder = video;
|
|
|
|
video->paired_encoder = audio;
|
|
|
|
}
|
|
|
|
|
|
|
|
pthread_mutex_unlock(&video->init_mutex);
|
|
|
|
pthread_mutex_unlock(&audio->init_mutex);
|
(API Change) Add support for multiple audio mixers
API changed:
--------------------------
void obs_output_set_audio_encoder(
obs_output_t *output,
obs_encoder_t *encoder);
obs_encoder_t *obs_output_get_audio_encoder(
const obs_output_t *output);
obs_encoder_t *obs_audio_encoder_create(
const char *id,
const char *name,
obs_data_t *settings);
Changed to:
--------------------------
/* 'idx' specifies the track index of the output */
void obs_output_set_audio_encoder(
obs_output_t *output,
obs_encoder_t *encoder,
size_t idx);
/* 'idx' specifies the track index of the output */
obs_encoder_t *obs_output_get_audio_encoder(
const obs_output_t *output,
size_t idx);
/* 'mixer_idx' specifies the mixer index to capture audio from */
obs_encoder_t *obs_audio_encoder_create(
const char *id,
const char *name,
obs_data_t *settings,
size_t mixer_idx);
Overview
--------------------------
This feature allows multiple audio mixers to be used at a time. This
capability was able to be added with surprisingly very little extra
overhead. Audio will not be mixed unless it's assigned to a specific
mixer, and mixers will not mix unless they have an active mix
connection.
Mostly this will be useful for being able to separate out specific audio
for recording versus streaming, but will also be useful for certain
streaming services that support multiple audio streams via RTMP.
I didn't want to use a variable amount of mixers due to the desire to
reduce heap allocations, so currently I set the limit to 4 simultaneous
mixers; this number can be increased later if needed, but honestly I
feel like it's just the right number to use.
Sources:
Sources can now specify which audio mixers their audio is mixed to; this
can be a single mixer or multiple mixers at a time. The
obs_source_set_audio_mixers function sets the audio mixer which an audio
source applies to. For example, 0xF would mean that the source applies
to all four mixers.
Audio Encoders:
Audio encoders now must specify which specific audio mixer they use when
they encode audio data.
Outputs:
Outputs that use encoders can now support multiple audio tracks at once
if they have the OBS_OUTPUT_MULTI_TRACK capability flag set. This is
mostly only useful for certain types of RTMP transmissions, though may
be useful for file formats that support multiple audio tracks as well
later on.
2015-01-14 02:12:08 -08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-09-25 17:44:05 -07:00
|
|
|
bool obs_output_initialize_encoders(obs_output_t *output, uint32_t flags)
|
Implement RTMP module (still needs drop code)
- Implement the RTMP output module. This time around, we just use a
simple FLV muxer, then just write to the stream with RTMP_Write.
Easy and effective.
- Fix the FLV muxer, the muxer now outputs proper FLV packets.
- Output API:
* When using encoders, automatically interleave encoded packets
before sending it to the output.
* Pair encoders and have them automatically wait for the other to
start to ensure sync.
* Change 'obs_output_signal_start_fail' to 'obs_output_signal_stop'
because it was a bit confusing, and doing this makes a lot more
sense for outputs that need to stop suddenly (disconnections/etc).
- Encoder API:
* Remove some unnecessary encoder functions from the actual API and
make them internal. Most of the encoder functions are handled
automatically by outputs anyway, so there's no real need to expose
them and end up inadvertently confusing plugin writers.
* Have audio encoders wait for the video encoder to get a frame, then
start at the exact data point that the first video frame starts to
ensure the most accrate sync of video/audio possible.
* Add a required 'frame_size' callback for audio encoders that
returns the expected number of frames desired to encode with. This
way, the libobs encoder API can handle the circular buffering
internally automatically for the encoder modules, so encoder
writers don't have to do it themselves.
- Fix a few bugs in the serializer interface. It was passing the wrong
variable for the data in a few cases.
- If a source has video, make obs_source_update defer the actual update
callback until the tick function is called to prevent threading
issues.
2014-04-07 22:00:10 -07:00
|
|
|
{
|
obs-studio UI: Implement stream settings UI
- Updated the services API so that it links up with an output and
the output gets data from that service rather than via settings.
This allows the service context to have control over how an output is
used, and makes it so that the URL/key/etc isn't necessarily some
static setting.
Also, if the service is attached to an output, it will stick around
until the output is destroyed.
- The settings interface has been updated so that it can allow the
usage of service plugins. What this means is that now you can create
a service plugin that can control aspects of the stream, and it
allows each service to create their own user interface if they create
a service plugin module.
- Testing out saving of current service information. Saves/loads from
JSON in to obs_data_t, seems to be working quite nicely, and the
service object information is saved/preserved on exit, and loaded
again on startup.
- I agonized over the settings user interface for days, and eventually
I just decided that the only way that users weren't going to be
fumbling over options was to split up the settings in to simple/basic
output, pre-configured, and then advanced for advanced use (such as
multiple outputs or services, which I'll implement later).
This was particularly painful to really design right, I wanted more
features and wanted to include everything in one interface but
ultimately just realized from experience that users are just not
technically knowledgable about it and will end up fumbling with the
settings rather than getting things done.
Basically, what this means is that casual users only have to enter in
about 3 things to configure their stream: Stream key, audio bitrate,
and video bitrate. I am really happy with this interface for those
types of users, but it definitely won't be sufficient for advanced
usage or for custom outputs, so that stuff will have to be separated.
- Improved the JSON usage for the 'common streaming services' context,
I realized that JSON arrays are there to ensure sorting, while
forgetting that general items are optimized for hashing. So
basically I'm just using arrays now to sort items in it.
2014-04-24 01:49:07 -07:00
|
|
|
bool encoded, has_video, has_audio, has_service;
|
(API Change) Add support for multiple audio mixers
API changed:
--------------------------
void obs_output_set_audio_encoder(
obs_output_t *output,
obs_encoder_t *encoder);
obs_encoder_t *obs_output_get_audio_encoder(
const obs_output_t *output);
obs_encoder_t *obs_audio_encoder_create(
const char *id,
const char *name,
obs_data_t *settings);
Changed to:
--------------------------
/* 'idx' specifies the track index of the output */
void obs_output_set_audio_encoder(
obs_output_t *output,
obs_encoder_t *encoder,
size_t idx);
/* 'idx' specifies the track index of the output */
obs_encoder_t *obs_output_get_audio_encoder(
const obs_output_t *output,
size_t idx);
/* 'mixer_idx' specifies the mixer index to capture audio from */
obs_encoder_t *obs_audio_encoder_create(
const char *id,
const char *name,
obs_data_t *settings,
size_t mixer_idx);
Overview
--------------------------
This feature allows multiple audio mixers to be used at a time. This
capability was able to be added with surprisingly very little extra
overhead. Audio will not be mixed unless it's assigned to a specific
mixer, and mixers will not mix unless they have an active mix
connection.
Mostly this will be useful for being able to separate out specific audio
for recording versus streaming, but will also be useful for certain
streaming services that support multiple audio streams via RTMP.
I didn't want to use a variable amount of mixers due to the desire to
reduce heap allocations, so currently I set the limit to 4 simultaneous
mixers; this number can be increased later if needed, but honestly I
feel like it's just the right number to use.
Sources:
Sources can now specify which audio mixers their audio is mixed to; this
can be a single mixer or multiple mixers at a time. The
obs_source_set_audio_mixers function sets the audio mixer which an audio
source applies to. For example, 0xF would mean that the source applies
to all four mixers.
Audio Encoders:
Audio encoders now must specify which specific audio mixer they use when
they encode audio data.
Outputs:
Outputs that use encoders can now support multiple audio tracks at once
if they have the OBS_OUTPUT_MULTI_TRACK capability flag set. This is
mostly only useful for certain types of RTMP transmissions, though may
be useful for file formats that support multiple audio tracks as well
later on.
2015-01-14 02:12:08 -08:00
|
|
|
size_t num_mixes = num_audio_mixes(output);
|
Implement RTMP module (still needs drop code)
- Implement the RTMP output module. This time around, we just use a
simple FLV muxer, then just write to the stream with RTMP_Write.
Easy and effective.
- Fix the FLV muxer, the muxer now outputs proper FLV packets.
- Output API:
* When using encoders, automatically interleave encoded packets
before sending it to the output.
* Pair encoders and have them automatically wait for the other to
start to ensure sync.
* Change 'obs_output_signal_start_fail' to 'obs_output_signal_stop'
because it was a bit confusing, and doing this makes a lot more
sense for outputs that need to stop suddenly (disconnections/etc).
- Encoder API:
* Remove some unnecessary encoder functions from the actual API and
make them internal. Most of the encoder functions are handled
automatically by outputs anyway, so there's no real need to expose
them and end up inadvertently confusing plugin writers.
* Have audio encoders wait for the video encoder to get a frame, then
start at the exact data point that the first video frame starts to
ensure the most accrate sync of video/audio possible.
* Add a required 'frame_size' callback for audio encoders that
returns the expected number of frames desired to encode with. This
way, the libobs encoder API can handle the circular buffering
internally automatically for the encoder modules, so encoder
writers don't have to do it themselves.
- Fix a few bugs in the serializer interface. It was passing the wrong
variable for the data in a few cases.
- If a source has video, make obs_source_update defer the actual update
callback until the tick function is called to prevent threading
issues.
2014-04-07 22:00:10 -07:00
|
|
|
|
2015-10-17 02:51:13 -07:00
|
|
|
if (!obs_output_valid(output, "obs_output_initialize_encoders"))
|
|
|
|
return false;
|
|
|
|
|
2019-06-22 22:13:45 -07:00
|
|
|
if (active(output))
|
|
|
|
return delay_active(output);
|
Implement RTMP module (still needs drop code)
- Implement the RTMP output module. This time around, we just use a
simple FLV muxer, then just write to the stream with RTMP_Write.
Easy and effective.
- Fix the FLV muxer, the muxer now outputs proper FLV packets.
- Output API:
* When using encoders, automatically interleave encoded packets
before sending it to the output.
* Pair encoders and have them automatically wait for the other to
start to ensure sync.
* Change 'obs_output_signal_start_fail' to 'obs_output_signal_stop'
because it was a bit confusing, and doing this makes a lot more
sense for outputs that need to stop suddenly (disconnections/etc).
- Encoder API:
* Remove some unnecessary encoder functions from the actual API and
make them internal. Most of the encoder functions are handled
automatically by outputs anyway, so there's no real need to expose
them and end up inadvertently confusing plugin writers.
* Have audio encoders wait for the video encoder to get a frame, then
start at the exact data point that the first video frame starts to
ensure the most accrate sync of video/audio possible.
* Add a required 'frame_size' callback for audio encoders that
returns the expected number of frames desired to encode with. This
way, the libobs encoder API can handle the circular buffering
internally automatically for the encoder modules, so encoder
writers don't have to do it themselves.
- Fix a few bugs in the serializer interface. It was passing the wrong
variable for the data in a few cases.
- If a source has video, make obs_source_update defer the actual update
callback until the tick function is called to prevent threading
issues.
2014-04-07 22:00:10 -07:00
|
|
|
|
obs-studio UI: Implement stream settings UI
- Updated the services API so that it links up with an output and
the output gets data from that service rather than via settings.
This allows the service context to have control over how an output is
used, and makes it so that the URL/key/etc isn't necessarily some
static setting.
Also, if the service is attached to an output, it will stick around
until the output is destroyed.
- The settings interface has been updated so that it can allow the
usage of service plugins. What this means is that now you can create
a service plugin that can control aspects of the stream, and it
allows each service to create their own user interface if they create
a service plugin module.
- Testing out saving of current service information. Saves/loads from
JSON in to obs_data_t, seems to be working quite nicely, and the
service object information is saved/preserved on exit, and loaded
again on startup.
- I agonized over the settings user interface for days, and eventually
I just decided that the only way that users weren't going to be
fumbling over options was to split up the settings in to simple/basic
output, pre-configured, and then advanced for advanced use (such as
multiple outputs or services, which I'll implement later).
This was particularly painful to really design right, I wanted more
features and wanted to include everything in one interface but
ultimately just realized from experience that users are just not
technically knowledgable about it and will end up fumbling with the
settings rather than getting things done.
Basically, what this means is that casual users only have to enter in
about 3 things to configure their stream: Stream key, audio bitrate,
and video bitrate. I am really happy with this interface for those
types of users, but it definitely won't be sufficient for advanced
usage or for custom outputs, so that stuff will have to be separated.
- Improved the JSON usage for the 'common streaming services' context,
I realized that JSON arrays are there to ensure sorting, while
forgetting that general items are optimized for hashing. So
basically I'm just using arrays now to sort items in it.
2014-04-24 01:49:07 -07:00
|
|
|
convert_flags(output, flags, &encoded, &has_video, &has_audio,
|
2019-06-22 22:13:45 -07:00
|
|
|
&has_service);
|
Implement RTMP module (still needs drop code)
- Implement the RTMP output module. This time around, we just use a
simple FLV muxer, then just write to the stream with RTMP_Write.
Easy and effective.
- Fix the FLV muxer, the muxer now outputs proper FLV packets.
- Output API:
* When using encoders, automatically interleave encoded packets
before sending it to the output.
* Pair encoders and have them automatically wait for the other to
start to ensure sync.
* Change 'obs_output_signal_start_fail' to 'obs_output_signal_stop'
because it was a bit confusing, and doing this makes a lot more
sense for outputs that need to stop suddenly (disconnections/etc).
- Encoder API:
* Remove some unnecessary encoder functions from the actual API and
make them internal. Most of the encoder functions are handled
automatically by outputs anyway, so there's no real need to expose
them and end up inadvertently confusing plugin writers.
* Have audio encoders wait for the video encoder to get a frame, then
start at the exact data point that the first video frame starts to
ensure the most accrate sync of video/audio possible.
* Add a required 'frame_size' callback for audio encoders that
returns the expected number of frames desired to encode with. This
way, the libobs encoder API can handle the circular buffering
internally automatically for the encoder modules, so encoder
writers don't have to do it themselves.
- Fix a few bugs in the serializer interface. It was passing the wrong
variable for the data in a few cases.
- If a source has video, make obs_source_update defer the actual update
callback until the tick function is called to prevent threading
issues.
2014-04-07 22:00:10 -07:00
|
|
|
|
|
|
|
if (!encoded)
|
|
|
|
return false;
|
2019-10-09 09:27:09 -07:00
|
|
|
if (has_video && !obs_encoder_initialize(output->video_encoder)) {
|
|
|
|
obs_output_set_last_error(
|
|
|
|
output,
|
|
|
|
obs_encoder_get_last_error(output->video_encoder));
|
Implement RTMP module (still needs drop code)
- Implement the RTMP output module. This time around, we just use a
simple FLV muxer, then just write to the stream with RTMP_Write.
Easy and effective.
- Fix the FLV muxer, the muxer now outputs proper FLV packets.
- Output API:
* When using encoders, automatically interleave encoded packets
before sending it to the output.
* Pair encoders and have them automatically wait for the other to
start to ensure sync.
* Change 'obs_output_signal_start_fail' to 'obs_output_signal_stop'
because it was a bit confusing, and doing this makes a lot more
sense for outputs that need to stop suddenly (disconnections/etc).
- Encoder API:
* Remove some unnecessary encoder functions from the actual API and
make them internal. Most of the encoder functions are handled
automatically by outputs anyway, so there's no real need to expose
them and end up inadvertently confusing plugin writers.
* Have audio encoders wait for the video encoder to get a frame, then
start at the exact data point that the first video frame starts to
ensure the most accrate sync of video/audio possible.
* Add a required 'frame_size' callback for audio encoders that
returns the expected number of frames desired to encode with. This
way, the libobs encoder API can handle the circular buffering
internally automatically for the encoder modules, so encoder
writers don't have to do it themselves.
- Fix a few bugs in the serializer interface. It was passing the wrong
variable for the data in a few cases.
- If a source has video, make obs_source_update defer the actual update
callback until the tick function is called to prevent threading
issues.
2014-04-07 22:00:10 -07:00
|
|
|
return false;
|
2019-10-09 09:27:09 -07:00
|
|
|
}
|
(API Change) Add support for multiple audio mixers
API changed:
--------------------------
void obs_output_set_audio_encoder(
obs_output_t *output,
obs_encoder_t *encoder);
obs_encoder_t *obs_output_get_audio_encoder(
const obs_output_t *output);
obs_encoder_t *obs_audio_encoder_create(
const char *id,
const char *name,
obs_data_t *settings);
Changed to:
--------------------------
/* 'idx' specifies the track index of the output */
void obs_output_set_audio_encoder(
obs_output_t *output,
obs_encoder_t *encoder,
size_t idx);
/* 'idx' specifies the track index of the output */
obs_encoder_t *obs_output_get_audio_encoder(
const obs_output_t *output,
size_t idx);
/* 'mixer_idx' specifies the mixer index to capture audio from */
obs_encoder_t *obs_audio_encoder_create(
const char *id,
const char *name,
obs_data_t *settings,
size_t mixer_idx);
Overview
--------------------------
This feature allows multiple audio mixers to be used at a time. This
capability was able to be added with surprisingly very little extra
overhead. Audio will not be mixed unless it's assigned to a specific
mixer, and mixers will not mix unless they have an active mix
connection.
Mostly this will be useful for being able to separate out specific audio
for recording versus streaming, but will also be useful for certain
streaming services that support multiple audio streams via RTMP.
I didn't want to use a variable amount of mixers due to the desire to
reduce heap allocations, so currently I set the limit to 4 simultaneous
mixers; this number can be increased later if needed, but honestly I
feel like it's just the right number to use.
Sources:
Sources can now specify which audio mixers their audio is mixed to; this
can be a single mixer or multiple mixers at a time. The
obs_source_set_audio_mixers function sets the audio mixer which an audio
source applies to. For example, 0xF would mean that the source applies
to all four mixers.
Audio Encoders:
Audio encoders now must specify which specific audio mixer they use when
they encode audio data.
Outputs:
Outputs that use encoders can now support multiple audio tracks at once
if they have the OBS_OUTPUT_MULTI_TRACK capability flag set. This is
mostly only useful for certain types of RTMP transmissions, though may
be useful for file formats that support multiple audio tracks as well
later on.
2015-01-14 02:12:08 -08:00
|
|
|
if (has_audio && !initialize_audio_encoders(output, num_mixes))
|
Implement RTMP module (still needs drop code)
- Implement the RTMP output module. This time around, we just use a
simple FLV muxer, then just write to the stream with RTMP_Write.
Easy and effective.
- Fix the FLV muxer, the muxer now outputs proper FLV packets.
- Output API:
* When using encoders, automatically interleave encoded packets
before sending it to the output.
* Pair encoders and have them automatically wait for the other to
start to ensure sync.
* Change 'obs_output_signal_start_fail' to 'obs_output_signal_stop'
because it was a bit confusing, and doing this makes a lot more
sense for outputs that need to stop suddenly (disconnections/etc).
- Encoder API:
* Remove some unnecessary encoder functions from the actual API and
make them internal. Most of the encoder functions are handled
automatically by outputs anyway, so there's no real need to expose
them and end up inadvertently confusing plugin writers.
* Have audio encoders wait for the video encoder to get a frame, then
start at the exact data point that the first video frame starts to
ensure the most accrate sync of video/audio possible.
* Add a required 'frame_size' callback for audio encoders that
returns the expected number of frames desired to encode with. This
way, the libobs encoder API can handle the circular buffering
internally automatically for the encoder modules, so encoder
writers don't have to do it themselves.
- Fix a few bugs in the serializer interface. It was passing the wrong
variable for the data in a few cases.
- If a source has video, make obs_source_update defer the actual update
callback until the tick function is called to prevent threading
issues.
2014-04-07 22:00:10 -07:00
|
|
|
return false;
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
libobs: Add encoded output delay support
This feature allows a user to delay an output (as long as the output
itself supports it). Needless to say this intended for live streams,
where users may want to delay their streams to prevent stream sniping,
cheating, and other such things.
The design this time was a bit more elaborate, but still simple in
design: the user can now schedule stops/starts without having to wait
for the stream itself to stop before being able to take any action.
Optionally, they can also forcibly stop stream (and delay) in case
something happens which they might not want to be streamed.
Additionally, a new option was added to preserve stream cutoff point on
disconnections/reconnections, so that if you get disconnected while
streaming, when it reconnects, it will reconnect right at the point
where it left off. This will probably be quite useful for a number of
applications in addition to regular delay, such as setting the delay to
1 second and then using this feature to minimize, for example, a
critical stream such as a tournament stream from getting any of its
stream data cut off. However, using this feature will of course cause
the stream data to buffer and increase delay (and memory usage) while
it's in the process of reconnecting.
2015-09-06 15:39:46 -07:00
|
|
|
static bool begin_delayed_capture(obs_output_t *output)
|
|
|
|
{
|
2016-06-20 16:07:29 -07:00
|
|
|
if (delay_capturing(output))
|
libobs: Add encoded output delay support
This feature allows a user to delay an output (as long as the output
itself supports it). Needless to say this intended for live streams,
where users may want to delay their streams to prevent stream sniping,
cheating, and other such things.
The design this time was a bit more elaborate, but still simple in
design: the user can now schedule stops/starts without having to wait
for the stream itself to stop before being able to take any action.
Optionally, they can also forcibly stop stream (and delay) in case
something happens which they might not want to be streamed.
Additionally, a new option was added to preserve stream cutoff point on
disconnections/reconnections, so that if you get disconnected while
streaming, when it reconnects, it will reconnect right at the point
where it left off. This will probably be quite useful for a number of
applications in addition to regular delay, such as setting the delay to
1 second and then using this feature to minimize, for example, a
critical stream such as a tournament stream from getting any of its
stream data cut off. However, using this feature will of course cause
the stream data to buffer and increase delay (and memory usage) while
it's in the process of reconnecting.
2015-09-06 15:39:46 -07:00
|
|
|
return false;
|
|
|
|
|
|
|
|
pthread_mutex_lock(&output->interleaved_mutex);
|
|
|
|
reset_packet_data(output);
|
2016-06-20 16:07:29 -07:00
|
|
|
os_atomic_set_bool(&output->delay_capturing, true);
|
libobs: Add encoded output delay support
This feature allows a user to delay an output (as long as the output
itself supports it). Needless to say this intended for live streams,
where users may want to delay their streams to prevent stream sniping,
cheating, and other such things.
The design this time was a bit more elaborate, but still simple in
design: the user can now schedule stops/starts without having to wait
for the stream itself to stop before being able to take any action.
Optionally, they can also forcibly stop stream (and delay) in case
something happens which they might not want to be streamed.
Additionally, a new option was added to preserve stream cutoff point on
disconnections/reconnections, so that if you get disconnected while
streaming, when it reconnects, it will reconnect right at the point
where it left off. This will probably be quite useful for a number of
applications in addition to regular delay, such as setting the delay to
1 second and then using this feature to minimize, for example, a
critical stream such as a tournament stream from getting any of its
stream data cut off. However, using this feature will of course cause
the stream data to buffer and increase delay (and memory usage) while
it's in the process of reconnecting.
2015-09-06 15:39:46 -07:00
|
|
|
pthread_mutex_unlock(&output->interleaved_mutex);
|
|
|
|
|
2016-06-20 16:07:29 -07:00
|
|
|
if (reconnecting(output)) {
|
libobs: Add encoded output delay support
This feature allows a user to delay an output (as long as the output
itself supports it). Needless to say this intended for live streams,
where users may want to delay their streams to prevent stream sniping,
cheating, and other such things.
The design this time was a bit more elaborate, but still simple in
design: the user can now schedule stops/starts without having to wait
for the stream itself to stop before being able to take any action.
Optionally, they can also forcibly stop stream (and delay) in case
something happens which they might not want to be streamed.
Additionally, a new option was added to preserve stream cutoff point on
disconnections/reconnections, so that if you get disconnected while
streaming, when it reconnects, it will reconnect right at the point
where it left off. This will probably be quite useful for a number of
applications in addition to regular delay, such as setting the delay to
1 second and then using this feature to minimize, for example, a
critical stream such as a tournament stream from getting any of its
stream data cut off. However, using this feature will of course cause
the stream data to buffer and increase delay (and memory usage) while
it's in the process of reconnecting.
2015-09-06 15:39:46 -07:00
|
|
|
signal_reconnect_success(output);
|
2016-06-20 16:07:29 -07:00
|
|
|
os_atomic_set_bool(&output->reconnecting, false);
|
libobs: Add encoded output delay support
This feature allows a user to delay an output (as long as the output
itself supports it). Needless to say this intended for live streams,
where users may want to delay their streams to prevent stream sniping,
cheating, and other such things.
The design this time was a bit more elaborate, but still simple in
design: the user can now schedule stops/starts without having to wait
for the stream itself to stop before being able to take any action.
Optionally, they can also forcibly stop stream (and delay) in case
something happens which they might not want to be streamed.
Additionally, a new option was added to preserve stream cutoff point on
disconnections/reconnections, so that if you get disconnected while
streaming, when it reconnects, it will reconnect right at the point
where it left off. This will probably be quite useful for a number of
applications in addition to regular delay, such as setting the delay to
1 second and then using this feature to minimize, for example, a
critical stream such as a tournament stream from getting any of its
stream data cut off. However, using this feature will of course cause
the stream data to buffer and increase delay (and memory usage) while
it's in the process of reconnecting.
2015-09-06 15:39:46 -07:00
|
|
|
} else {
|
|
|
|
signal_start(output);
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2019-07-06 20:07:37 -07:00
|
|
|
static void reset_raw_output(obs_output_t *output)
|
|
|
|
{
|
|
|
|
clear_audio_buffers(output);
|
|
|
|
|
2019-08-23 09:40:52 -07:00
|
|
|
if (output->audio) {
|
|
|
|
const struct audio_output_info *aoi =
|
|
|
|
audio_output_get_info(output->audio);
|
|
|
|
struct audio_convert_info conv = output->audio_conversion;
|
|
|
|
struct audio_convert_info info = {
|
|
|
|
aoi->samples_per_sec,
|
|
|
|
aoi->format,
|
|
|
|
aoi->speakers,
|
|
|
|
};
|
|
|
|
|
|
|
|
if (output->audio_conversion_set) {
|
|
|
|
if (conv.samples_per_sec)
|
|
|
|
info.samples_per_sec = conv.samples_per_sec;
|
|
|
|
if (conv.format != AUDIO_FORMAT_UNKNOWN)
|
|
|
|
info.format = conv.format;
|
|
|
|
if (conv.speakers != SPEAKERS_UNKNOWN)
|
|
|
|
info.speakers = conv.speakers;
|
|
|
|
}
|
|
|
|
|
|
|
|
output->sample_rate = info.samples_per_sec;
|
|
|
|
output->planes = get_audio_planes(info.format, info.speakers);
|
|
|
|
output->total_audio_frames = 0;
|
|
|
|
output->audio_size =
|
|
|
|
get_audio_size(info.format, info.speakers, 1);
|
2019-07-06 20:07:37 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
output->audio_start_ts = 0;
|
|
|
|
output->video_start_ts = 0;
|
2019-07-07 12:27:13 -07:00
|
|
|
|
|
|
|
pause_reset(&output->pause);
|
2019-07-06 20:07:37 -07:00
|
|
|
}
|
|
|
|
|
2014-09-25 17:44:05 -07:00
|
|
|
bool obs_output_begin_data_capture(obs_output_t *output, uint32_t flags)
|
Implement encoder usage with outputs
- Make it so that encoders can be assigned to outputs. If an encoder
is destroyed, it will automatically remove itself from that output.
I specifically didn't want to do reference counting because it leaves
too much potential for unchecked references and it just felt like it
would be more trouble than it's worth.
- Add a 'flags' value to the output definition structure. This lets
the output specify if it uses video/audio, and whether the output is
meant to be used with OBS encoders or not.
- Remove boilerplate code for outputs. This makes it easier to program
outputs. The boilerplate code involved before was mostly just
involving connecting to the audio/video data streams directly in each
output plugin.
Instead of doing that, simply add plugin callback functions for
receiving video/audio (either encoded or non-encoded, whichever it's
set to use), and then call obs_output_begin_data_capture and
obs_output_end_data_capture to automatically handle setting up
connections to raw or encoded video/audio streams for the plugin.
- Remove 'active' function from output callbacks, as it's no longer
really needed now that the libobs output context automatically knows
when the output is active or not.
- Make it so that an encoder cannot be destroyed until all data
connections to the encoder have been removed.
- Change the 'start' and 'stop' functions in the encoder interface to
just an 'initialize' callback, which initializes the encoder.
- Make it so that the encoder must be initialized first before the data
stream can be started. The reason why initialization was separated
from starting the encoder stream was because we need to be able to
check that the settings used with the encoder *can* be used first.
This problem was especially annoying if you had both video/audio
encoding. Before, you'd have to check the return value from
obs_encoder_start, and if that second encoder fails, then you
basically had to stop the first encoder again, making for
unnecessary boilerplate code whenever starting up two encoders.
2014-03-27 21:50:15 -07:00
|
|
|
{
|
obs-studio UI: Implement stream settings UI
- Updated the services API so that it links up with an output and
the output gets data from that service rather than via settings.
This allows the service context to have control over how an output is
used, and makes it so that the URL/key/etc isn't necessarily some
static setting.
Also, if the service is attached to an output, it will stick around
until the output is destroyed.
- The settings interface has been updated so that it can allow the
usage of service plugins. What this means is that now you can create
a service plugin that can control aspects of the stream, and it
allows each service to create their own user interface if they create
a service plugin module.
- Testing out saving of current service information. Saves/loads from
JSON in to obs_data_t, seems to be working quite nicely, and the
service object information is saved/preserved on exit, and loaded
again on startup.
- I agonized over the settings user interface for days, and eventually
I just decided that the only way that users weren't going to be
fumbling over options was to split up the settings in to simple/basic
output, pre-configured, and then advanced for advanced use (such as
multiple outputs or services, which I'll implement later).
This was particularly painful to really design right, I wanted more
features and wanted to include everything in one interface but
ultimately just realized from experience that users are just not
technically knowledgable about it and will end up fumbling with the
settings rather than getting things done.
Basically, what this means is that casual users only have to enter in
about 3 things to configure their stream: Stream key, audio bitrate,
and video bitrate. I am really happy with this interface for those
types of users, but it definitely won't be sufficient for advanced
usage or for custom outputs, so that stuff will have to be separated.
- Improved the JSON usage for the 'common streaming services' context,
I realized that JSON arrays are there to ensure sorting, while
forgetting that general items are optimized for hashing. So
basically I'm just using arrays now to sort items in it.
2014-04-24 01:49:07 -07:00
|
|
|
bool encoded, has_video, has_audio, has_service;
|
libobs: Pair encoders only when output actually starts
Normally, paired encoders are unpaired when they stop. However, if the
pairing occurs before the encoders actually start, and the encoders
never actually end up starting, they are never unpaired, and that
pairing stays with them until the next time an output is started up
again. That in turn can cause an output that uses one of the encoders
but not the other to not function correctly, and neither properly
"start" nor stop because the data is queued continually in the
interleaved packet array.
For example, let's say there are two outputs, two video encoders, and
one audio encoder. This can be reproduced by using advanced output mode
and making the two outputs use separate video encoders while sharing
track 1's audio encoder. If you start up the stream output first and it
fails to fully connect for whatever reason (bad server, bad stream key,
etc), then you start up the recording output, the recording output will
appear to be running, but will not stop when you hit "stop recording".
It will stay perpetually on "stopping recording" and will get stuck that
way. This is because when the streaming output started, the streaming
output would initially pair video encoder A with audio encoder A before
the encoders actually fully started up (as the encoders do not fully
start up until a connection is successfully made), and when the
recording output starts up after that disconnection, audio encoder A
will wait for video encoder A rather than video encoder B because that
pairing was never actually cleared.
So, instead of pairing encoders when the output starts, wait until the
encoders themselves are being started and then pair the encoders at that
point in time. This ensures that the encoders start up and will clear
their pairing when no longer in use.
2019-05-22 00:37:12 -07:00
|
|
|
size_t num_mixes;
|
Implement encoder usage with outputs
- Make it so that encoders can be assigned to outputs. If an encoder
is destroyed, it will automatically remove itself from that output.
I specifically didn't want to do reference counting because it leaves
too much potential for unchecked references and it just felt like it
would be more trouble than it's worth.
- Add a 'flags' value to the output definition structure. This lets
the output specify if it uses video/audio, and whether the output is
meant to be used with OBS encoders or not.
- Remove boilerplate code for outputs. This makes it easier to program
outputs. The boilerplate code involved before was mostly just
involving connecting to the audio/video data streams directly in each
output plugin.
Instead of doing that, simply add plugin callback functions for
receiving video/audio (either encoded or non-encoded, whichever it's
set to use), and then call obs_output_begin_data_capture and
obs_output_end_data_capture to automatically handle setting up
connections to raw or encoded video/audio streams for the plugin.
- Remove 'active' function from output callbacks, as it's no longer
really needed now that the libobs output context automatically knows
when the output is active or not.
- Make it so that an encoder cannot be destroyed until all data
connections to the encoder have been removed.
- Change the 'start' and 'stop' functions in the encoder interface to
just an 'initialize' callback, which initializes the encoder.
- Make it so that the encoder must be initialized first before the data
stream can be started. The reason why initialization was separated
from starting the encoder stream was because we need to be able to
check that the settings used with the encoder *can* be used first.
This problem was especially annoying if you had both video/audio
encoding. Before, you'd have to check the return value from
obs_encoder_start, and if that second encoder fails, then you
basically had to stop the first encoder again, making for
unnecessary boilerplate code whenever starting up two encoders.
2014-03-27 21:50:15 -07:00
|
|
|
|
2015-10-17 02:51:13 -07:00
|
|
|
if (!obs_output_valid(output, "obs_output_begin_data_capture"))
|
|
|
|
return false;
|
|
|
|
|
2019-06-22 22:13:45 -07:00
|
|
|
if (delay_active(output))
|
|
|
|
return begin_delayed_capture(output);
|
|
|
|
if (active(output))
|
|
|
|
return false;
|
Implement encoder usage with outputs
- Make it so that encoders can be assigned to outputs. If an encoder
is destroyed, it will automatically remove itself from that output.
I specifically didn't want to do reference counting because it leaves
too much potential for unchecked references and it just felt like it
would be more trouble than it's worth.
- Add a 'flags' value to the output definition structure. This lets
the output specify if it uses video/audio, and whether the output is
meant to be used with OBS encoders or not.
- Remove boilerplate code for outputs. This makes it easier to program
outputs. The boilerplate code involved before was mostly just
involving connecting to the audio/video data streams directly in each
output plugin.
Instead of doing that, simply add plugin callback functions for
receiving video/audio (either encoded or non-encoded, whichever it's
set to use), and then call obs_output_begin_data_capture and
obs_output_end_data_capture to automatically handle setting up
connections to raw or encoded video/audio streams for the plugin.
- Remove 'active' function from output callbacks, as it's no longer
really needed now that the libobs output context automatically knows
when the output is active or not.
- Make it so that an encoder cannot be destroyed until all data
connections to the encoder have been removed.
- Change the 'start' and 'stop' functions in the encoder interface to
just an 'initialize' callback, which initializes the encoder.
- Make it so that the encoder must be initialized first before the data
stream can be started. The reason why initialization was separated
from starting the encoder stream was because we need to be able to
check that the settings used with the encoder *can* be used first.
This problem was especially annoying if you had both video/audio
encoding. Before, you'd have to check the return value from
obs_encoder_start, and if that second encoder fails, then you
basically had to stop the first encoder again, making for
unnecessary boilerplate code whenever starting up two encoders.
2014-03-27 21:50:15 -07:00
|
|
|
|
2019-06-22 22:13:45 -07:00
|
|
|
output->total_frames = 0;
|
2014-07-06 14:16:31 -07:00
|
|
|
|
2019-07-06 20:07:37 -07:00
|
|
|
if ((output->info.flags & OBS_OUTPUT_ENCODED) == 0) {
|
|
|
|
reset_raw_output(output);
|
|
|
|
}
|
|
|
|
|
obs-studio UI: Implement stream settings UI
- Updated the services API so that it links up with an output and
the output gets data from that service rather than via settings.
This allows the service context to have control over how an output is
used, and makes it so that the URL/key/etc isn't necessarily some
static setting.
Also, if the service is attached to an output, it will stick around
until the output is destroyed.
- The settings interface has been updated so that it can allow the
usage of service plugins. What this means is that now you can create
a service plugin that can control aspects of the stream, and it
allows each service to create their own user interface if they create
a service plugin module.
- Testing out saving of current service information. Saves/loads from
JSON in to obs_data_t, seems to be working quite nicely, and the
service object information is saved/preserved on exit, and loaded
again on startup.
- I agonized over the settings user interface for days, and eventually
I just decided that the only way that users weren't going to be
fumbling over options was to split up the settings in to simple/basic
output, pre-configured, and then advanced for advanced use (such as
multiple outputs or services, which I'll implement later).
This was particularly painful to really design right, I wanted more
features and wanted to include everything in one interface but
ultimately just realized from experience that users are just not
technically knowledgable about it and will end up fumbling with the
settings rather than getting things done.
Basically, what this means is that casual users only have to enter in
about 3 things to configure their stream: Stream key, audio bitrate,
and video bitrate. I am really happy with this interface for those
types of users, but it definitely won't be sufficient for advanced
usage or for custom outputs, so that stuff will have to be separated.
- Improved the JSON usage for the 'common streaming services' context,
I realized that JSON arrays are there to ensure sorting, while
forgetting that general items are optimized for hashing. So
basically I'm just using arrays now to sort items in it.
2014-04-24 01:49:07 -07:00
|
|
|
convert_flags(output, flags, &encoded, &has_video, &has_audio,
|
2019-06-22 22:13:45 -07:00
|
|
|
&has_service);
|
Implement encoder usage with outputs
- Make it so that encoders can be assigned to outputs. If an encoder
is destroyed, it will automatically remove itself from that output.
I specifically didn't want to do reference counting because it leaves
too much potential for unchecked references and it just felt like it
would be more trouble than it's worth.
- Add a 'flags' value to the output definition structure. This lets
the output specify if it uses video/audio, and whether the output is
meant to be used with OBS encoders or not.
- Remove boilerplate code for outputs. This makes it easier to program
outputs. The boilerplate code involved before was mostly just
involving connecting to the audio/video data streams directly in each
output plugin.
Instead of doing that, simply add plugin callback functions for
receiving video/audio (either encoded or non-encoded, whichever it's
set to use), and then call obs_output_begin_data_capture and
obs_output_end_data_capture to automatically handle setting up
connections to raw or encoded video/audio streams for the plugin.
- Remove 'active' function from output callbacks, as it's no longer
really needed now that the libobs output context automatically knows
when the output is active or not.
- Make it so that an encoder cannot be destroyed until all data
connections to the encoder have been removed.
- Change the 'start' and 'stop' functions in the encoder interface to
just an 'initialize' callback, which initializes the encoder.
- Make it so that the encoder must be initialized first before the data
stream can be started. The reason why initialization was separated
from starting the encoder stream was because we need to be able to
check that the settings used with the encoder *can* be used first.
This problem was especially annoying if you had both video/audio
encoding. Before, you'd have to check the return value from
obs_encoder_start, and if that second encoder fails, then you
basically had to stop the first encoder again, making for
unnecessary boilerplate code whenever starting up two encoders.
2014-03-27 21:50:15 -07:00
|
|
|
|
obs-studio UI: Implement stream settings UI
- Updated the services API so that it links up with an output and
the output gets data from that service rather than via settings.
This allows the service context to have control over how an output is
used, and makes it so that the URL/key/etc isn't necessarily some
static setting.
Also, if the service is attached to an output, it will stick around
until the output is destroyed.
- The settings interface has been updated so that it can allow the
usage of service plugins. What this means is that now you can create
a service plugin that can control aspects of the stream, and it
allows each service to create their own user interface if they create
a service plugin module.
- Testing out saving of current service information. Saves/loads from
JSON in to obs_data_t, seems to be working quite nicely, and the
service object information is saved/preserved on exit, and loaded
again on startup.
- I agonized over the settings user interface for days, and eventually
I just decided that the only way that users weren't going to be
fumbling over options was to split up the settings in to simple/basic
output, pre-configured, and then advanced for advanced use (such as
multiple outputs or services, which I'll implement later).
This was particularly painful to really design right, I wanted more
features and wanted to include everything in one interface but
ultimately just realized from experience that users are just not
technically knowledgable about it and will end up fumbling with the
settings rather than getting things done.
Basically, what this means is that casual users only have to enter in
about 3 things to configure their stream: Stream key, audio bitrate,
and video bitrate. I am really happy with this interface for those
types of users, but it definitely won't be sufficient for advanced
usage or for custom outputs, so that stuff will have to be separated.
- Improved the JSON usage for the 'common streaming services' context,
I realized that JSON arrays are there to ensure sorting, while
forgetting that general items are optimized for hashing. So
basically I'm just using arrays now to sort items in it.
2014-04-24 01:49:07 -07:00
|
|
|
if (!can_begin_data_capture(output, encoded, has_video, has_audio,
|
2019-06-22 22:13:45 -07:00
|
|
|
has_service))
|
Implement encoder usage with outputs
- Make it so that encoders can be assigned to outputs. If an encoder
is destroyed, it will automatically remove itself from that output.
I specifically didn't want to do reference counting because it leaves
too much potential for unchecked references and it just felt like it
would be more trouble than it's worth.
- Add a 'flags' value to the output definition structure. This lets
the output specify if it uses video/audio, and whether the output is
meant to be used with OBS encoders or not.
- Remove boilerplate code for outputs. This makes it easier to program
outputs. The boilerplate code involved before was mostly just
involving connecting to the audio/video data streams directly in each
output plugin.
Instead of doing that, simply add plugin callback functions for
receiving video/audio (either encoded or non-encoded, whichever it's
set to use), and then call obs_output_begin_data_capture and
obs_output_end_data_capture to automatically handle setting up
connections to raw or encoded video/audio streams for the plugin.
- Remove 'active' function from output callbacks, as it's no longer
really needed now that the libobs output context automatically knows
when the output is active or not.
- Make it so that an encoder cannot be destroyed until all data
connections to the encoder have been removed.
- Change the 'start' and 'stop' functions in the encoder interface to
just an 'initialize' callback, which initializes the encoder.
- Make it so that the encoder must be initialized first before the data
stream can be started. The reason why initialization was separated
from starting the encoder stream was because we need to be able to
check that the settings used with the encoder *can* be used first.
This problem was especially annoying if you had both video/audio
encoding. Before, you'd have to check the return value from
obs_encoder_start, and if that second encoder fails, then you
basically had to stop the first encoder again, making for
unnecessary boilerplate code whenever starting up two encoders.
2014-03-27 21:50:15 -07:00
|
|
|
return false;
|
|
|
|
|
libobs: Pair encoders only when output actually starts
Normally, paired encoders are unpaired when they stop. However, if the
pairing occurs before the encoders actually start, and the encoders
never actually end up starting, they are never unpaired, and that
pairing stays with them until the next time an output is started up
again. That in turn can cause an output that uses one of the encoders
but not the other to not function correctly, and neither properly
"start" nor stop because the data is queued continually in the
interleaved packet array.
For example, let's say there are two outputs, two video encoders, and
one audio encoder. This can be reproduced by using advanced output mode
and making the two outputs use separate video encoders while sharing
track 1's audio encoder. If you start up the stream output first and it
fails to fully connect for whatever reason (bad server, bad stream key,
etc), then you start up the recording output, the recording output will
appear to be running, but will not stop when you hit "stop recording".
It will stay perpetually on "stopping recording" and will get stuck that
way. This is because when the streaming output started, the streaming
output would initially pair video encoder A with audio encoder A before
the encoders actually fully started up (as the encoders do not fully
start up until a connection is successfully made), and when the
recording output starts up after that disconnection, audio encoder A
will wait for video encoder A rather than video encoder B because that
pairing was never actually cleared.
So, instead of pairing encoders when the output starts, wait until the
encoders themselves are being started and then pair the encoders at that
point in time. This ensures that the encoders start up and will clear
their pairing when no longer in use.
2019-05-22 00:37:12 -07:00
|
|
|
num_mixes = num_audio_mixes(output);
|
|
|
|
if (has_video && has_audio)
|
|
|
|
pair_encoders(output, num_mixes);
|
|
|
|
|
2016-06-11 11:42:29 -07:00
|
|
|
os_atomic_set_bool(&output->data_active, true);
|
Implement encoder usage with outputs
- Make it so that encoders can be assigned to outputs. If an encoder
is destroyed, it will automatically remove itself from that output.
I specifically didn't want to do reference counting because it leaves
too much potential for unchecked references and it just felt like it
would be more trouble than it's worth.
- Add a 'flags' value to the output definition structure. This lets
the output specify if it uses video/audio, and whether the output is
meant to be used with OBS encoders or not.
- Remove boilerplate code for outputs. This makes it easier to program
outputs. The boilerplate code involved before was mostly just
involving connecting to the audio/video data streams directly in each
output plugin.
Instead of doing that, simply add plugin callback functions for
receiving video/audio (either encoded or non-encoded, whichever it's
set to use), and then call obs_output_begin_data_capture and
obs_output_end_data_capture to automatically handle setting up
connections to raw or encoded video/audio streams for the plugin.
- Remove 'active' function from output callbacks, as it's no longer
really needed now that the libobs output context automatically knows
when the output is active or not.
- Make it so that an encoder cannot be destroyed until all data
connections to the encoder have been removed.
- Change the 'start' and 'stop' functions in the encoder interface to
just an 'initialize' callback, which initializes the encoder.
- Make it so that the encoder must be initialized first before the data
stream can be started. The reason why initialization was separated
from starting the encoder stream was because we need to be able to
check that the settings used with the encoder *can* be used first.
This problem was especially annoying if you had both video/audio
encoding. Before, you'd have to check the return value from
obs_encoder_start, and if that second encoder fails, then you
basically had to stop the first encoder again, making for
unnecessary boilerplate code whenever starting up two encoders.
2014-03-27 21:50:15 -07:00
|
|
|
hook_data_capture(output, encoded, has_video, has_audio);
|
obs-studio UI: Implement stream settings UI
- Updated the services API so that it links up with an output and
the output gets data from that service rather than via settings.
This allows the service context to have control over how an output is
used, and makes it so that the URL/key/etc isn't necessarily some
static setting.
Also, if the service is attached to an output, it will stick around
until the output is destroyed.
- The settings interface has been updated so that it can allow the
usage of service plugins. What this means is that now you can create
a service plugin that can control aspects of the stream, and it
allows each service to create their own user interface if they create
a service plugin module.
- Testing out saving of current service information. Saves/loads from
JSON in to obs_data_t, seems to be working quite nicely, and the
service object information is saved/preserved on exit, and loaded
again on startup.
- I agonized over the settings user interface for days, and eventually
I just decided that the only way that users weren't going to be
fumbling over options was to split up the settings in to simple/basic
output, pre-configured, and then advanced for advanced use (such as
multiple outputs or services, which I'll implement later).
This was particularly painful to really design right, I wanted more
features and wanted to include everything in one interface but
ultimately just realized from experience that users are just not
technically knowledgable about it and will end up fumbling with the
settings rather than getting things done.
Basically, what this means is that casual users only have to enter in
about 3 things to configure their stream: Stream key, audio bitrate,
and video bitrate. I am really happy with this interface for those
types of users, but it definitely won't be sufficient for advanced
usage or for custom outputs, so that stuff will have to be separated.
- Improved the JSON usage for the 'common streaming services' context,
I realized that JSON arrays are there to ensure sorting, while
forgetting that general items are optimized for hashing. So
basically I'm just using arrays now to sort items in it.
2014-04-24 01:49:07 -07:00
|
|
|
|
|
|
|
if (has_service)
|
|
|
|
obs_service_activate(output->service);
|
|
|
|
|
2015-09-10 11:02:20 -07:00
|
|
|
do_output_signal(output, "activate");
|
2016-06-20 16:07:29 -07:00
|
|
|
os_atomic_set_bool(&output->active, true);
|
2014-07-02 16:38:29 -07:00
|
|
|
|
2016-06-20 16:07:29 -07:00
|
|
|
if (reconnecting(output)) {
|
2014-07-02 16:38:29 -07:00
|
|
|
signal_reconnect_success(output);
|
2016-06-20 16:07:29 -07:00
|
|
|
os_atomic_set_bool(&output->reconnecting, false);
|
libobs: Add encoded output delay support
This feature allows a user to delay an output (as long as the output
itself supports it). Needless to say this intended for live streams,
where users may want to delay their streams to prevent stream sniping,
cheating, and other such things.
The design this time was a bit more elaborate, but still simple in
design: the user can now schedule stops/starts without having to wait
for the stream itself to stop before being able to take any action.
Optionally, they can also forcibly stop stream (and delay) in case
something happens which they might not want to be streamed.
Additionally, a new option was added to preserve stream cutoff point on
disconnections/reconnections, so that if you get disconnected while
streaming, when it reconnects, it will reconnect right at the point
where it left off. This will probably be quite useful for a number of
applications in addition to regular delay, such as setting the delay to
1 second and then using this feature to minimize, for example, a
critical stream such as a tournament stream from getting any of its
stream data cut off. However, using this feature will of course cause
the stream data to buffer and increase delay (and memory usage) while
it's in the process of reconnecting.
2015-09-06 15:39:46 -07:00
|
|
|
|
2016-06-20 16:07:29 -07:00
|
|
|
} else if (delay_active(output)) {
|
libobs: Add encoded output delay support
This feature allows a user to delay an output (as long as the output
itself supports it). Needless to say this intended for live streams,
where users may want to delay their streams to prevent stream sniping,
cheating, and other such things.
The design this time was a bit more elaborate, but still simple in
design: the user can now schedule stops/starts without having to wait
for the stream itself to stop before being able to take any action.
Optionally, they can also forcibly stop stream (and delay) in case
something happens which they might not want to be streamed.
Additionally, a new option was added to preserve stream cutoff point on
disconnections/reconnections, so that if you get disconnected while
streaming, when it reconnects, it will reconnect right at the point
where it left off. This will probably be quite useful for a number of
applications in addition to regular delay, such as setting the delay to
1 second and then using this feature to minimize, for example, a
critical stream such as a tournament stream from getting any of its
stream data cut off. However, using this feature will of course cause
the stream data to buffer and increase delay (and memory usage) while
it's in the process of reconnecting.
2015-09-06 15:39:46 -07:00
|
|
|
do_output_signal(output, "starting");
|
|
|
|
|
2014-07-02 16:38:29 -07:00
|
|
|
} else {
|
|
|
|
signal_start(output);
|
|
|
|
}
|
|
|
|
|
Implement encoder usage with outputs
- Make it so that encoders can be assigned to outputs. If an encoder
is destroyed, it will automatically remove itself from that output.
I specifically didn't want to do reference counting because it leaves
too much potential for unchecked references and it just felt like it
would be more trouble than it's worth.
- Add a 'flags' value to the output definition structure. This lets
the output specify if it uses video/audio, and whether the output is
meant to be used with OBS encoders or not.
- Remove boilerplate code for outputs. This makes it easier to program
outputs. The boilerplate code involved before was mostly just
involving connecting to the audio/video data streams directly in each
output plugin.
Instead of doing that, simply add plugin callback functions for
receiving video/audio (either encoded or non-encoded, whichever it's
set to use), and then call obs_output_begin_data_capture and
obs_output_end_data_capture to automatically handle setting up
connections to raw or encoded video/audio streams for the plugin.
- Remove 'active' function from output callbacks, as it's no longer
really needed now that the libobs output context automatically knows
when the output is active or not.
- Make it so that an encoder cannot be destroyed until all data
connections to the encoder have been removed.
- Change the 'start' and 'stop' functions in the encoder interface to
just an 'initialize' callback, which initializes the encoder.
- Make it so that the encoder must be initialized first before the data
stream can be started. The reason why initialization was separated
from starting the encoder stream was because we need to be able to
check that the settings used with the encoder *can* be used first.
This problem was especially annoying if you had both video/audio
encoding. Before, you'd have to check the return value from
obs_encoder_start, and if that second encoder fails, then you
basically had to stop the first encoder again, making for
unnecessary boilerplate code whenever starting up two encoders.
2014-03-27 21:50:15 -07:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
(API Change) Add support for multiple audio mixers
API changed:
--------------------------
void obs_output_set_audio_encoder(
obs_output_t *output,
obs_encoder_t *encoder);
obs_encoder_t *obs_output_get_audio_encoder(
const obs_output_t *output);
obs_encoder_t *obs_audio_encoder_create(
const char *id,
const char *name,
obs_data_t *settings);
Changed to:
--------------------------
/* 'idx' specifies the track index of the output */
void obs_output_set_audio_encoder(
obs_output_t *output,
obs_encoder_t *encoder,
size_t idx);
/* 'idx' specifies the track index of the output */
obs_encoder_t *obs_output_get_audio_encoder(
const obs_output_t *output,
size_t idx);
/* 'mixer_idx' specifies the mixer index to capture audio from */
obs_encoder_t *obs_audio_encoder_create(
const char *id,
const char *name,
obs_data_t *settings,
size_t mixer_idx);
Overview
--------------------------
This feature allows multiple audio mixers to be used at a time. This
capability was able to be added with surprisingly very little extra
overhead. Audio will not be mixed unless it's assigned to a specific
mixer, and mixers will not mix unless they have an active mix
connection.
Mostly this will be useful for being able to separate out specific audio
for recording versus streaming, but will also be useful for certain
streaming services that support multiple audio streams via RTMP.
I didn't want to use a variable amount of mixers due to the desire to
reduce heap allocations, so currently I set the limit to 4 simultaneous
mixers; this number can be increased later if needed, but honestly I
feel like it's just the right number to use.
Sources:
Sources can now specify which audio mixers their audio is mixed to; this
can be a single mixer or multiple mixers at a time. The
obs_source_set_audio_mixers function sets the audio mixer which an audio
source applies to. For example, 0xF would mean that the source applies
to all four mixers.
Audio Encoders:
Audio encoders now must specify which specific audio mixer they use when
they encode audio data.
Outputs:
Outputs that use encoders can now support multiple audio tracks at once
if they have the OBS_OUTPUT_MULTI_TRACK capability flag set. This is
mostly only useful for certain types of RTMP transmissions, though may
be useful for file formats that support multiple audio tracks as well
later on.
2015-01-14 02:12:08 -08:00
|
|
|
static inline void stop_audio_encoders(obs_output_t *output,
|
2019-06-22 22:13:45 -07:00
|
|
|
encoded_callback_t encoded_callback)
|
(API Change) Add support for multiple audio mixers
API changed:
--------------------------
void obs_output_set_audio_encoder(
obs_output_t *output,
obs_encoder_t *encoder);
obs_encoder_t *obs_output_get_audio_encoder(
const obs_output_t *output);
obs_encoder_t *obs_audio_encoder_create(
const char *id,
const char *name,
obs_data_t *settings);
Changed to:
--------------------------
/* 'idx' specifies the track index of the output */
void obs_output_set_audio_encoder(
obs_output_t *output,
obs_encoder_t *encoder,
size_t idx);
/* 'idx' specifies the track index of the output */
obs_encoder_t *obs_output_get_audio_encoder(
const obs_output_t *output,
size_t idx);
/* 'mixer_idx' specifies the mixer index to capture audio from */
obs_encoder_t *obs_audio_encoder_create(
const char *id,
const char *name,
obs_data_t *settings,
size_t mixer_idx);
Overview
--------------------------
This feature allows multiple audio mixers to be used at a time. This
capability was able to be added with surprisingly very little extra
overhead. Audio will not be mixed unless it's assigned to a specific
mixer, and mixers will not mix unless they have an active mix
connection.
Mostly this will be useful for being able to separate out specific audio
for recording versus streaming, but will also be useful for certain
streaming services that support multiple audio streams via RTMP.
I didn't want to use a variable amount of mixers due to the desire to
reduce heap allocations, so currently I set the limit to 4 simultaneous
mixers; this number can be increased later if needed, but honestly I
feel like it's just the right number to use.
Sources:
Sources can now specify which audio mixers their audio is mixed to; this
can be a single mixer or multiple mixers at a time. The
obs_source_set_audio_mixers function sets the audio mixer which an audio
source applies to. For example, 0xF would mean that the source applies
to all four mixers.
Audio Encoders:
Audio encoders now must specify which specific audio mixer they use when
they encode audio data.
Outputs:
Outputs that use encoders can now support multiple audio tracks at once
if they have the OBS_OUTPUT_MULTI_TRACK capability flag set. This is
mostly only useful for certain types of RTMP transmissions, though may
be useful for file formats that support multiple audio tracks as well
later on.
2015-01-14 02:12:08 -08:00
|
|
|
{
|
|
|
|
size_t num_mixes = num_audio_mixes(output);
|
|
|
|
|
|
|
|
for (size_t i = 0; i < num_mixes; i++) {
|
2019-06-22 22:13:45 -07:00
|
|
|
obs_encoder_stop(output->audio_encoders[i], encoded_callback,
|
|
|
|
output);
|
(API Change) Add support for multiple audio mixers
API changed:
--------------------------
void obs_output_set_audio_encoder(
obs_output_t *output,
obs_encoder_t *encoder);
obs_encoder_t *obs_output_get_audio_encoder(
const obs_output_t *output);
obs_encoder_t *obs_audio_encoder_create(
const char *id,
const char *name,
obs_data_t *settings);
Changed to:
--------------------------
/* 'idx' specifies the track index of the output */
void obs_output_set_audio_encoder(
obs_output_t *output,
obs_encoder_t *encoder,
size_t idx);
/* 'idx' specifies the track index of the output */
obs_encoder_t *obs_output_get_audio_encoder(
const obs_output_t *output,
size_t idx);
/* 'mixer_idx' specifies the mixer index to capture audio from */
obs_encoder_t *obs_audio_encoder_create(
const char *id,
const char *name,
obs_data_t *settings,
size_t mixer_idx);
Overview
--------------------------
This feature allows multiple audio mixers to be used at a time. This
capability was able to be added with surprisingly very little extra
overhead. Audio will not be mixed unless it's assigned to a specific
mixer, and mixers will not mix unless they have an active mix
connection.
Mostly this will be useful for being able to separate out specific audio
for recording versus streaming, but will also be useful for certain
streaming services that support multiple audio streams via RTMP.
I didn't want to use a variable amount of mixers due to the desire to
reduce heap allocations, so currently I set the limit to 4 simultaneous
mixers; this number can be increased later if needed, but honestly I
feel like it's just the right number to use.
Sources:
Sources can now specify which audio mixers their audio is mixed to; this
can be a single mixer or multiple mixers at a time. The
obs_source_set_audio_mixers function sets the audio mixer which an audio
source applies to. For example, 0xF would mean that the source applies
to all four mixers.
Audio Encoders:
Audio encoders now must specify which specific audio mixer they use when
they encode audio data.
Outputs:
Outputs that use encoders can now support multiple audio tracks at once
if they have the OBS_OUTPUT_MULTI_TRACK capability flag set. This is
mostly only useful for certain types of RTMP transmissions, though may
be useful for file formats that support multiple audio tracks as well
later on.
2015-01-14 02:12:08 -08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-10-04 20:04:40 -07:00
|
|
|
static inline void stop_raw_audio(obs_output_t *output)
|
|
|
|
{
|
|
|
|
if (output->info.raw_audio2) {
|
|
|
|
for (int idx = 0; idx < MAX_AUDIO_MIXES; idx++) {
|
|
|
|
if ((output->mixer_mask & ((size_t)1 << idx)) != 0) {
|
2019-06-22 22:13:45 -07:00
|
|
|
audio_output_disconnect(
|
|
|
|
output->audio, idx,
|
|
|
|
default_raw_audio_callback, output);
|
2018-10-04 20:04:40 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
2019-06-22 22:13:45 -07:00
|
|
|
audio_output_disconnect(output->audio, get_first_mixer(output),
|
|
|
|
default_raw_audio_callback, output);
|
2018-10-04 20:04:40 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-06-21 17:25:26 -07:00
|
|
|
static void *end_data_capture_thread(void *data)
|
Implement encoder usage with outputs
- Make it so that encoders can be assigned to outputs. If an encoder
is destroyed, it will automatically remove itself from that output.
I specifically didn't want to do reference counting because it leaves
too much potential for unchecked references and it just felt like it
would be more trouble than it's worth.
- Add a 'flags' value to the output definition structure. This lets
the output specify if it uses video/audio, and whether the output is
meant to be used with OBS encoders or not.
- Remove boilerplate code for outputs. This makes it easier to program
outputs. The boilerplate code involved before was mostly just
involving connecting to the audio/video data streams directly in each
output plugin.
Instead of doing that, simply add plugin callback functions for
receiving video/audio (either encoded or non-encoded, whichever it's
set to use), and then call obs_output_begin_data_capture and
obs_output_end_data_capture to automatically handle setting up
connections to raw or encoded video/audio streams for the plugin.
- Remove 'active' function from output callbacks, as it's no longer
really needed now that the libobs output context automatically knows
when the output is active or not.
- Make it so that an encoder cannot be destroyed until all data
connections to the encoder have been removed.
- Change the 'start' and 'stop' functions in the encoder interface to
just an 'initialize' callback, which initializes the encoder.
- Make it so that the encoder must be initialized first before the data
stream can be started. The reason why initialization was separated
from starting the encoder stream was because we need to be able to
check that the settings used with the encoder *can* be used first.
This problem was especially annoying if you had both video/audio
encoding. Before, you'd have to check the return value from
obs_encoder_start, and if that second encoder fails, then you
basically had to stop the first encoder again, making for
unnecessary boilerplate code whenever starting up two encoders.
2014-03-27 21:50:15 -07:00
|
|
|
{
|
obs-studio UI: Implement stream settings UI
- Updated the services API so that it links up with an output and
the output gets data from that service rather than via settings.
This allows the service context to have control over how an output is
used, and makes it so that the URL/key/etc isn't necessarily some
static setting.
Also, if the service is attached to an output, it will stick around
until the output is destroyed.
- The settings interface has been updated so that it can allow the
usage of service plugins. What this means is that now you can create
a service plugin that can control aspects of the stream, and it
allows each service to create their own user interface if they create
a service plugin module.
- Testing out saving of current service information. Saves/loads from
JSON in to obs_data_t, seems to be working quite nicely, and the
service object information is saved/preserved on exit, and loaded
again on startup.
- I agonized over the settings user interface for days, and eventually
I just decided that the only way that users weren't going to be
fumbling over options was to split up the settings in to simple/basic
output, pre-configured, and then advanced for advanced use (such as
multiple outputs or services, which I'll implement later).
This was particularly painful to really design right, I wanted more
features and wanted to include everything in one interface but
ultimately just realized from experience that users are just not
technically knowledgable about it and will end up fumbling with the
settings rather than getting things done.
Basically, what this means is that casual users only have to enter in
about 3 things to configure their stream: Stream key, audio bitrate,
and video bitrate. I am really happy with this interface for those
types of users, but it definitely won't be sufficient for advanced
usage or for custom outputs, so that stuff will have to be separated.
- Improved the JSON usage for the 'common streaming services' context,
I realized that JSON arrays are there to ensure sorting, while
forgetting that general items are optimized for hashing. So
basically I'm just using arrays now to sort items in it.
2014-04-24 01:49:07 -07:00
|
|
|
bool encoded, has_video, has_audio, has_service;
|
(API Change) Add support for multiple audio mixers
API changed:
--------------------------
void obs_output_set_audio_encoder(
obs_output_t *output,
obs_encoder_t *encoder);
obs_encoder_t *obs_output_get_audio_encoder(
const obs_output_t *output);
obs_encoder_t *obs_audio_encoder_create(
const char *id,
const char *name,
obs_data_t *settings);
Changed to:
--------------------------
/* 'idx' specifies the track index of the output */
void obs_output_set_audio_encoder(
obs_output_t *output,
obs_encoder_t *encoder,
size_t idx);
/* 'idx' specifies the track index of the output */
obs_encoder_t *obs_output_get_audio_encoder(
const obs_output_t *output,
size_t idx);
/* 'mixer_idx' specifies the mixer index to capture audio from */
obs_encoder_t *obs_audio_encoder_create(
const char *id,
const char *name,
obs_data_t *settings,
size_t mixer_idx);
Overview
--------------------------
This feature allows multiple audio mixers to be used at a time. This
capability was able to be added with surprisingly very little extra
overhead. Audio will not be mixed unless it's assigned to a specific
mixer, and mixers will not mix unless they have an active mix
connection.
Mostly this will be useful for being able to separate out specific audio
for recording versus streaming, but will also be useful for certain
streaming services that support multiple audio streams via RTMP.
I didn't want to use a variable amount of mixers due to the desire to
reduce heap allocations, so currently I set the limit to 4 simultaneous
mixers; this number can be increased later if needed, but honestly I
feel like it's just the right number to use.
Sources:
Sources can now specify which audio mixers their audio is mixed to; this
can be a single mixer or multiple mixers at a time. The
obs_source_set_audio_mixers function sets the audio mixer which an audio
source applies to. For example, 0xF would mean that the source applies
to all four mixers.
Audio Encoders:
Audio encoders now must specify which specific audio mixer they use when
they encode audio data.
Outputs:
Outputs that use encoders can now support multiple audio tracks at once
if they have the OBS_OUTPUT_MULTI_TRACK capability flag set. This is
mostly only useful for certain types of RTMP transmissions, though may
be useful for file formats that support multiple audio tracks as well
later on.
2015-01-14 02:12:08 -08:00
|
|
|
encoded_callback_t encoded_callback;
|
2016-06-21 17:25:26 -07:00
|
|
|
obs_output_t *output = data;
|
Implement encoder usage with outputs
- Make it so that encoders can be assigned to outputs. If an encoder
is destroyed, it will automatically remove itself from that output.
I specifically didn't want to do reference counting because it leaves
too much potential for unchecked references and it just felt like it
would be more trouble than it's worth.
- Add a 'flags' value to the output definition structure. This lets
the output specify if it uses video/audio, and whether the output is
meant to be used with OBS encoders or not.
- Remove boilerplate code for outputs. This makes it easier to program
outputs. The boilerplate code involved before was mostly just
involving connecting to the audio/video data streams directly in each
output plugin.
Instead of doing that, simply add plugin callback functions for
receiving video/audio (either encoded or non-encoded, whichever it's
set to use), and then call obs_output_begin_data_capture and
obs_output_end_data_capture to automatically handle setting up
connections to raw or encoded video/audio streams for the plugin.
- Remove 'active' function from output callbacks, as it's no longer
really needed now that the libobs output context automatically knows
when the output is active or not.
- Make it so that an encoder cannot be destroyed until all data
connections to the encoder have been removed.
- Change the 'start' and 'stop' functions in the encoder interface to
just an 'initialize' callback, which initializes the encoder.
- Make it so that the encoder must be initialized first before the data
stream can be started. The reason why initialization was separated
from starting the encoder stream was because we need to be able to
check that the settings used with the encoder *can* be used first.
This problem was especially annoying if you had both video/audio
encoding. Before, you'd have to check the return value from
obs_encoder_start, and if that second encoder fails, then you
basically had to stop the first encoder again, making for
unnecessary boilerplate code whenever starting up two encoders.
2014-03-27 21:50:15 -07:00
|
|
|
|
obs-studio UI: Implement stream settings UI
- Updated the services API so that it links up with an output and
the output gets data from that service rather than via settings.
This allows the service context to have control over how an output is
used, and makes it so that the URL/key/etc isn't necessarily some
static setting.
Also, if the service is attached to an output, it will stick around
until the output is destroyed.
- The settings interface has been updated so that it can allow the
usage of service plugins. What this means is that now you can create
a service plugin that can control aspects of the stream, and it
allows each service to create their own user interface if they create
a service plugin module.
- Testing out saving of current service information. Saves/loads from
JSON in to obs_data_t, seems to be working quite nicely, and the
service object information is saved/preserved on exit, and loaded
again on startup.
- I agonized over the settings user interface for days, and eventually
I just decided that the only way that users weren't going to be
fumbling over options was to split up the settings in to simple/basic
output, pre-configured, and then advanced for advanced use (such as
multiple outputs or services, which I'll implement later).
This was particularly painful to really design right, I wanted more
features and wanted to include everything in one interface but
ultimately just realized from experience that users are just not
technically knowledgable about it and will end up fumbling with the
settings rather than getting things done.
Basically, what this means is that casual users only have to enter in
about 3 things to configure their stream: Stream key, audio bitrate,
and video bitrate. I am really happy with this interface for those
types of users, but it definitely won't be sufficient for advanced
usage or for custom outputs, so that stuff will have to be separated.
- Improved the JSON usage for the 'common streaming services' context,
I realized that JSON arrays are there to ensure sorting, while
forgetting that general items are optimized for hashing. So
basically I'm just using arrays now to sort items in it.
2014-04-24 01:49:07 -07:00
|
|
|
convert_flags(output, 0, &encoded, &has_video, &has_audio,
|
2019-06-22 22:13:45 -07:00
|
|
|
&has_service);
|
Implement encoder usage with outputs
- Make it so that encoders can be assigned to outputs. If an encoder
is destroyed, it will automatically remove itself from that output.
I specifically didn't want to do reference counting because it leaves
too much potential for unchecked references and it just felt like it
would be more trouble than it's worth.
- Add a 'flags' value to the output definition structure. This lets
the output specify if it uses video/audio, and whether the output is
meant to be used with OBS encoders or not.
- Remove boilerplate code for outputs. This makes it easier to program
outputs. The boilerplate code involved before was mostly just
involving connecting to the audio/video data streams directly in each
output plugin.
Instead of doing that, simply add plugin callback functions for
receiving video/audio (either encoded or non-encoded, whichever it's
set to use), and then call obs_output_begin_data_capture and
obs_output_end_data_capture to automatically handle setting up
connections to raw or encoded video/audio streams for the plugin.
- Remove 'active' function from output callbacks, as it's no longer
really needed now that the libobs output context automatically knows
when the output is active or not.
- Make it so that an encoder cannot be destroyed until all data
connections to the encoder have been removed.
- Change the 'start' and 'stop' functions in the encoder interface to
just an 'initialize' callback, which initializes the encoder.
- Make it so that the encoder must be initialized first before the data
stream can be started. The reason why initialization was separated
from starting the encoder stream was because we need to be able to
check that the settings used with the encoder *can* be used first.
This problem was especially annoying if you had both video/audio
encoding. Before, you'd have to check the return value from
obs_encoder_start, and if that second encoder fails, then you
basically had to stop the first encoder again, making for
unnecessary boilerplate code whenever starting up two encoders.
2014-03-27 21:50:15 -07:00
|
|
|
|
|
|
|
if (encoded) {
|
libobs: Add encoded output delay support
This feature allows a user to delay an output (as long as the output
itself supports it). Needless to say this intended for live streams,
where users may want to delay their streams to prevent stream sniping,
cheating, and other such things.
The design this time was a bit more elaborate, but still simple in
design: the user can now schedule stops/starts without having to wait
for the stream itself to stop before being able to take any action.
Optionally, they can also forcibly stop stream (and delay) in case
something happens which they might not want to be streamed.
Additionally, a new option was added to preserve stream cutoff point on
disconnections/reconnections, so that if you get disconnected while
streaming, when it reconnects, it will reconnect right at the point
where it left off. This will probably be quite useful for a number of
applications in addition to regular delay, such as setting the delay to
1 second and then using this feature to minimize, for example, a
critical stream such as a tournament stream from getting any of its
stream data cut off. However, using this feature will of course cause
the stream data to buffer and increase delay (and memory usage) while
it's in the process of reconnecting.
2015-09-06 15:39:46 -07:00
|
|
|
if (output->active_delay_ns)
|
|
|
|
encoded_callback = process_delay;
|
|
|
|
else
|
2019-06-22 22:13:45 -07:00
|
|
|
encoded_callback = (has_video && has_audio)
|
|
|
|
? interleave_packets
|
|
|
|
: default_encoded_callback;
|
2014-04-04 00:30:37 -07:00
|
|
|
|
Implement encoder usage with outputs
- Make it so that encoders can be assigned to outputs. If an encoder
is destroyed, it will automatically remove itself from that output.
I specifically didn't want to do reference counting because it leaves
too much potential for unchecked references and it just felt like it
would be more trouble than it's worth.
- Add a 'flags' value to the output definition structure. This lets
the output specify if it uses video/audio, and whether the output is
meant to be used with OBS encoders or not.
- Remove boilerplate code for outputs. This makes it easier to program
outputs. The boilerplate code involved before was mostly just
involving connecting to the audio/video data streams directly in each
output plugin.
Instead of doing that, simply add plugin callback functions for
receiving video/audio (either encoded or non-encoded, whichever it's
set to use), and then call obs_output_begin_data_capture and
obs_output_end_data_capture to automatically handle setting up
connections to raw or encoded video/audio streams for the plugin.
- Remove 'active' function from output callbacks, as it's no longer
really needed now that the libobs output context automatically knows
when the output is active or not.
- Make it so that an encoder cannot be destroyed until all data
connections to the encoder have been removed.
- Change the 'start' and 'stop' functions in the encoder interface to
just an 'initialize' callback, which initializes the encoder.
- Make it so that the encoder must be initialized first before the data
stream can be started. The reason why initialization was separated
from starting the encoder stream was because we need to be able to
check that the settings used with the encoder *can* be used first.
This problem was especially annoying if you had both video/audio
encoding. Before, you'd have to check the return value from
obs_encoder_start, and if that second encoder fails, then you
basically had to stop the first encoder again, making for
unnecessary boilerplate code whenever starting up two encoders.
2014-03-27 21:50:15 -07:00
|
|
|
if (has_video)
|
|
|
|
obs_encoder_stop(output->video_encoder,
|
2019-06-22 22:13:45 -07:00
|
|
|
encoded_callback, output);
|
Implement encoder usage with outputs
- Make it so that encoders can be assigned to outputs. If an encoder
is destroyed, it will automatically remove itself from that output.
I specifically didn't want to do reference counting because it leaves
too much potential for unchecked references and it just felt like it
would be more trouble than it's worth.
- Add a 'flags' value to the output definition structure. This lets
the output specify if it uses video/audio, and whether the output is
meant to be used with OBS encoders or not.
- Remove boilerplate code for outputs. This makes it easier to program
outputs. The boilerplate code involved before was mostly just
involving connecting to the audio/video data streams directly in each
output plugin.
Instead of doing that, simply add plugin callback functions for
receiving video/audio (either encoded or non-encoded, whichever it's
set to use), and then call obs_output_begin_data_capture and
obs_output_end_data_capture to automatically handle setting up
connections to raw or encoded video/audio streams for the plugin.
- Remove 'active' function from output callbacks, as it's no longer
really needed now that the libobs output context automatically knows
when the output is active or not.
- Make it so that an encoder cannot be destroyed until all data
connections to the encoder have been removed.
- Change the 'start' and 'stop' functions in the encoder interface to
just an 'initialize' callback, which initializes the encoder.
- Make it so that the encoder must be initialized first before the data
stream can be started. The reason why initialization was separated
from starting the encoder stream was because we need to be able to
check that the settings used with the encoder *can* be used first.
This problem was especially annoying if you had both video/audio
encoding. Before, you'd have to check the return value from
obs_encoder_start, and if that second encoder fails, then you
basically had to stop the first encoder again, making for
unnecessary boilerplate code whenever starting up two encoders.
2014-03-27 21:50:15 -07:00
|
|
|
if (has_audio)
|
(API Change) Add support for multiple audio mixers
API changed:
--------------------------
void obs_output_set_audio_encoder(
obs_output_t *output,
obs_encoder_t *encoder);
obs_encoder_t *obs_output_get_audio_encoder(
const obs_output_t *output);
obs_encoder_t *obs_audio_encoder_create(
const char *id,
const char *name,
obs_data_t *settings);
Changed to:
--------------------------
/* 'idx' specifies the track index of the output */
void obs_output_set_audio_encoder(
obs_output_t *output,
obs_encoder_t *encoder,
size_t idx);
/* 'idx' specifies the track index of the output */
obs_encoder_t *obs_output_get_audio_encoder(
const obs_output_t *output,
size_t idx);
/* 'mixer_idx' specifies the mixer index to capture audio from */
obs_encoder_t *obs_audio_encoder_create(
const char *id,
const char *name,
obs_data_t *settings,
size_t mixer_idx);
Overview
--------------------------
This feature allows multiple audio mixers to be used at a time. This
capability was able to be added with surprisingly very little extra
overhead. Audio will not be mixed unless it's assigned to a specific
mixer, and mixers will not mix unless they have an active mix
connection.
Mostly this will be useful for being able to separate out specific audio
for recording versus streaming, but will also be useful for certain
streaming services that support multiple audio streams via RTMP.
I didn't want to use a variable amount of mixers due to the desire to
reduce heap allocations, so currently I set the limit to 4 simultaneous
mixers; this number can be increased later if needed, but honestly I
feel like it's just the right number to use.
Sources:
Sources can now specify which audio mixers their audio is mixed to; this
can be a single mixer or multiple mixers at a time. The
obs_source_set_audio_mixers function sets the audio mixer which an audio
source applies to. For example, 0xF would mean that the source applies
to all four mixers.
Audio Encoders:
Audio encoders now must specify which specific audio mixer they use when
they encode audio data.
Outputs:
Outputs that use encoders can now support multiple audio tracks at once
if they have the OBS_OUTPUT_MULTI_TRACK capability flag set. This is
mostly only useful for certain types of RTMP transmissions, though may
be useful for file formats that support multiple audio tracks as well
later on.
2015-01-14 02:12:08 -08:00
|
|
|
stop_audio_encoders(output, encoded_callback);
|
Implement encoder usage with outputs
- Make it so that encoders can be assigned to outputs. If an encoder
is destroyed, it will automatically remove itself from that output.
I specifically didn't want to do reference counting because it leaves
too much potential for unchecked references and it just felt like it
would be more trouble than it's worth.
- Add a 'flags' value to the output definition structure. This lets
the output specify if it uses video/audio, and whether the output is
meant to be used with OBS encoders or not.
- Remove boilerplate code for outputs. This makes it easier to program
outputs. The boilerplate code involved before was mostly just
involving connecting to the audio/video data streams directly in each
output plugin.
Instead of doing that, simply add plugin callback functions for
receiving video/audio (either encoded or non-encoded, whichever it's
set to use), and then call obs_output_begin_data_capture and
obs_output_end_data_capture to automatically handle setting up
connections to raw or encoded video/audio streams for the plugin.
- Remove 'active' function from output callbacks, as it's no longer
really needed now that the libobs output context automatically knows
when the output is active or not.
- Make it so that an encoder cannot be destroyed until all data
connections to the encoder have been removed.
- Change the 'start' and 'stop' functions in the encoder interface to
just an 'initialize' callback, which initializes the encoder.
- Make it so that the encoder must be initialized first before the data
stream can be started. The reason why initialization was separated
from starting the encoder stream was because we need to be able to
check that the settings used with the encoder *can* be used first.
This problem was especially annoying if you had both video/audio
encoding. Before, you'd have to check the return value from
obs_encoder_start, and if that second encoder fails, then you
basically had to stop the first encoder again, making for
unnecessary boilerplate code whenever starting up two encoders.
2014-03-27 21:50:15 -07:00
|
|
|
} else {
|
|
|
|
if (has_video)
|
2018-01-31 18:54:36 -08:00
|
|
|
stop_raw_video(output->video,
|
2019-06-22 22:13:45 -07:00
|
|
|
default_raw_video_callback, output);
|
Implement encoder usage with outputs
- Make it so that encoders can be assigned to outputs. If an encoder
is destroyed, it will automatically remove itself from that output.
I specifically didn't want to do reference counting because it leaves
too much potential for unchecked references and it just felt like it
would be more trouble than it's worth.
- Add a 'flags' value to the output definition structure. This lets
the output specify if it uses video/audio, and whether the output is
meant to be used with OBS encoders or not.
- Remove boilerplate code for outputs. This makes it easier to program
outputs. The boilerplate code involved before was mostly just
involving connecting to the audio/video data streams directly in each
output plugin.
Instead of doing that, simply add plugin callback functions for
receiving video/audio (either encoded or non-encoded, whichever it's
set to use), and then call obs_output_begin_data_capture and
obs_output_end_data_capture to automatically handle setting up
connections to raw or encoded video/audio streams for the plugin.
- Remove 'active' function from output callbacks, as it's no longer
really needed now that the libobs output context automatically knows
when the output is active or not.
- Make it so that an encoder cannot be destroyed until all data
connections to the encoder have been removed.
- Change the 'start' and 'stop' functions in the encoder interface to
just an 'initialize' callback, which initializes the encoder.
- Make it so that the encoder must be initialized first before the data
stream can be started. The reason why initialization was separated
from starting the encoder stream was because we need to be able to
check that the settings used with the encoder *can* be used first.
This problem was especially annoying if you had both video/audio
encoding. Before, you'd have to check the return value from
obs_encoder_start, and if that second encoder fails, then you
basically had to stop the first encoder again, making for
unnecessary boilerplate code whenever starting up two encoders.
2014-03-27 21:50:15 -07:00
|
|
|
if (has_audio)
|
2018-10-04 20:04:40 -07:00
|
|
|
stop_raw_audio(output);
|
Implement encoder usage with outputs
- Make it so that encoders can be assigned to outputs. If an encoder
is destroyed, it will automatically remove itself from that output.
I specifically didn't want to do reference counting because it leaves
too much potential for unchecked references and it just felt like it
would be more trouble than it's worth.
- Add a 'flags' value to the output definition structure. This lets
the output specify if it uses video/audio, and whether the output is
meant to be used with OBS encoders or not.
- Remove boilerplate code for outputs. This makes it easier to program
outputs. The boilerplate code involved before was mostly just
involving connecting to the audio/video data streams directly in each
output plugin.
Instead of doing that, simply add plugin callback functions for
receiving video/audio (either encoded or non-encoded, whichever it's
set to use), and then call obs_output_begin_data_capture and
obs_output_end_data_capture to automatically handle setting up
connections to raw or encoded video/audio streams for the plugin.
- Remove 'active' function from output callbacks, as it's no longer
really needed now that the libobs output context automatically knows
when the output is active or not.
- Make it so that an encoder cannot be destroyed until all data
connections to the encoder have been removed.
- Change the 'start' and 'stop' functions in the encoder interface to
just an 'initialize' callback, which initializes the encoder.
- Make it so that the encoder must be initialized first before the data
stream can be started. The reason why initialization was separated
from starting the encoder stream was because we need to be able to
check that the settings used with the encoder *can* be used first.
This problem was especially annoying if you had both video/audio
encoding. Before, you'd have to check the return value from
obs_encoder_start, and if that second encoder fails, then you
basically had to stop the first encoder again, making for
unnecessary boilerplate code whenever starting up two encoders.
2014-03-27 21:50:15 -07:00
|
|
|
}
|
|
|
|
|
obs-studio UI: Implement stream settings UI
- Updated the services API so that it links up with an output and
the output gets data from that service rather than via settings.
This allows the service context to have control over how an output is
used, and makes it so that the URL/key/etc isn't necessarily some
static setting.
Also, if the service is attached to an output, it will stick around
until the output is destroyed.
- The settings interface has been updated so that it can allow the
usage of service plugins. What this means is that now you can create
a service plugin that can control aspects of the stream, and it
allows each service to create their own user interface if they create
a service plugin module.
- Testing out saving of current service information. Saves/loads from
JSON in to obs_data_t, seems to be working quite nicely, and the
service object information is saved/preserved on exit, and loaded
again on startup.
- I agonized over the settings user interface for days, and eventually
I just decided that the only way that users weren't going to be
fumbling over options was to split up the settings in to simple/basic
output, pre-configured, and then advanced for advanced use (such as
multiple outputs or services, which I'll implement later).
This was particularly painful to really design right, I wanted more
features and wanted to include everything in one interface but
ultimately just realized from experience that users are just not
technically knowledgable about it and will end up fumbling with the
settings rather than getting things done.
Basically, what this means is that casual users only have to enter in
about 3 things to configure their stream: Stream key, audio bitrate,
and video bitrate. I am really happy with this interface for those
types of users, but it definitely won't be sufficient for advanced
usage or for custom outputs, so that stuff will have to be separated.
- Improved the JSON usage for the 'common streaming services' context,
I realized that JSON arrays are there to ensure sorting, while
forgetting that general items are optimized for hashing. So
basically I'm just using arrays now to sort items in it.
2014-04-24 01:49:07 -07:00
|
|
|
if (has_service)
|
|
|
|
obs_service_deactivate(output->service, false);
|
|
|
|
|
libobs: Add encoded output delay support
This feature allows a user to delay an output (as long as the output
itself supports it). Needless to say this intended for live streams,
where users may want to delay their streams to prevent stream sniping,
cheating, and other such things.
The design this time was a bit more elaborate, but still simple in
design: the user can now schedule stops/starts without having to wait
for the stream itself to stop before being able to take any action.
Optionally, they can also forcibly stop stream (and delay) in case
something happens which they might not want to be streamed.
Additionally, a new option was added to preserve stream cutoff point on
disconnections/reconnections, so that if you get disconnected while
streaming, when it reconnects, it will reconnect right at the point
where it left off. This will probably be quite useful for a number of
applications in addition to regular delay, such as setting the delay to
1 second and then using this feature to minimize, for example, a
critical stream such as a tournament stream from getting any of its
stream data cut off. However, using this feature will of course cause
the stream data to buffer and increase delay (and memory usage) while
it's in the process of reconnecting.
2015-09-06 15:39:46 -07:00
|
|
|
if (output->active_delay_ns)
|
|
|
|
obs_output_cleanup_delay(output);
|
|
|
|
|
2015-09-10 11:02:20 -07:00
|
|
|
do_output_signal(output, "deactivate");
|
2016-06-20 16:07:29 -07:00
|
|
|
os_atomic_set_bool(&output->active, false);
|
2016-06-20 17:09:21 -07:00
|
|
|
os_event_signal(output->stopping_event);
|
2016-06-21 17:25:26 -07:00
|
|
|
os_atomic_set_bool(&output->end_data_capture_thread_active, false);
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2016-06-11 11:42:29 -07:00
|
|
|
static void obs_output_end_data_capture_internal(obs_output_t *output,
|
2019-06-22 22:13:45 -07:00
|
|
|
bool signal)
|
2016-06-21 17:25:26 -07:00
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (!obs_output_valid(output, "obs_output_end_data_capture"))
|
|
|
|
return;
|
|
|
|
|
2016-06-11 11:42:29 -07:00
|
|
|
if (!active(output) || !data_active(output)) {
|
|
|
|
if (signal) {
|
|
|
|
signal_stop(output);
|
|
|
|
output->stop_code = OBS_OUTPUT_SUCCESS;
|
2017-05-20 13:11:59 -07:00
|
|
|
os_event_signal(output->stopping_event);
|
2016-06-11 11:42:29 -07:00
|
|
|
}
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2016-06-21 17:25:26 -07:00
|
|
|
if (delay_active(output)) {
|
|
|
|
os_atomic_set_bool(&output->delay_capturing, false);
|
2016-06-11 11:42:29 -07:00
|
|
|
|
|
|
|
if (!os_atomic_load_long(&output->delay_restart_refs)) {
|
|
|
|
os_atomic_set_bool(&output->delay_active, false);
|
|
|
|
} else {
|
|
|
|
os_event_signal(output->stopping_event);
|
|
|
|
return;
|
|
|
|
}
|
2016-06-21 17:25:26 -07:00
|
|
|
}
|
|
|
|
|
2016-06-11 11:42:29 -07:00
|
|
|
os_atomic_set_bool(&output->data_active, false);
|
|
|
|
|
|
|
|
if (output->video)
|
|
|
|
log_frame_info(output);
|
2016-06-21 17:25:26 -07:00
|
|
|
|
|
|
|
if (data_capture_ending(output))
|
|
|
|
pthread_join(output->end_data_capture_thread, NULL);
|
|
|
|
|
|
|
|
os_atomic_set_bool(&output->end_data_capture_thread_active, true);
|
|
|
|
ret = pthread_create(&output->end_data_capture_thread, NULL,
|
2019-06-22 22:13:45 -07:00
|
|
|
end_data_capture_thread, output);
|
2016-06-21 17:25:26 -07:00
|
|
|
if (ret != 0) {
|
2019-06-22 22:13:45 -07:00
|
|
|
blog(LOG_WARNING,
|
|
|
|
"Failed to create end_data_capture_thread "
|
|
|
|
"for output '%s'!",
|
|
|
|
output->context.name);
|
2016-06-21 17:25:26 -07:00
|
|
|
end_data_capture_thread(output);
|
|
|
|
}
|
2016-06-11 11:42:29 -07:00
|
|
|
|
|
|
|
if (signal) {
|
|
|
|
signal_stop(output);
|
|
|
|
output->stop_code = OBS_OUTPUT_SUCCESS;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void obs_output_end_data_capture(obs_output_t *output)
|
|
|
|
{
|
|
|
|
obs_output_end_data_capture_internal(output, true);
|
2014-04-01 11:55:18 -07:00
|
|
|
}
|
|
|
|
|
2014-07-02 16:38:29 -07:00
|
|
|
static void *reconnect_thread(void *param)
|
|
|
|
{
|
|
|
|
struct obs_output *output = param;
|
|
|
|
|
|
|
|
output->reconnect_thread_active = true;
|
|
|
|
|
2022-06-25 16:19:48 -07:00
|
|
|
if (os_event_timedwait(output->reconnect_stop_event,
|
|
|
|
output->reconnect_retry_cur_msec) == ETIMEDOUT)
|
libobs: Add encoded output delay support
This feature allows a user to delay an output (as long as the output
itself supports it). Needless to say this intended for live streams,
where users may want to delay their streams to prevent stream sniping,
cheating, and other such things.
The design this time was a bit more elaborate, but still simple in
design: the user can now schedule stops/starts without having to wait
for the stream itself to stop before being able to take any action.
Optionally, they can also forcibly stop stream (and delay) in case
something happens which they might not want to be streamed.
Additionally, a new option was added to preserve stream cutoff point on
disconnections/reconnections, so that if you get disconnected while
streaming, when it reconnects, it will reconnect right at the point
where it left off. This will probably be quite useful for a number of
applications in addition to regular delay, such as setting the delay to
1 second and then using this feature to minimize, for example, a
critical stream such as a tournament stream from getting any of its
stream data cut off. However, using this feature will of course cause
the stream data to buffer and increase delay (and memory usage) while
it's in the process of reconnecting.
2015-09-06 15:39:46 -07:00
|
|
|
obs_output_actual_start(output);
|
2014-07-02 16:38:29 -07:00
|
|
|
|
|
|
|
if (os_event_try(output->reconnect_stop_event) == EAGAIN)
|
|
|
|
pthread_detach(output->reconnect_thread);
|
2015-09-11 08:20:18 -07:00
|
|
|
else
|
2016-06-20 16:07:29 -07:00
|
|
|
os_atomic_set_bool(&output->reconnecting, false);
|
2014-07-02 16:38:29 -07:00
|
|
|
|
|
|
|
output->reconnect_thread_active = false;
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void output_reconnect(struct obs_output *output)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
2016-06-20 16:07:29 -07:00
|
|
|
if (!reconnecting(output)) {
|
2022-06-25 16:19:48 -07:00
|
|
|
output->reconnect_retry_cur_msec =
|
|
|
|
output->reconnect_retry_sec * 1000;
|
2014-07-02 16:38:29 -07:00
|
|
|
output->reconnect_retries = 0;
|
2015-05-10 16:07:22 -07:00
|
|
|
}
|
2014-07-02 16:38:29 -07:00
|
|
|
|
|
|
|
if (output->reconnect_retries >= output->reconnect_retry_max) {
|
2016-06-11 11:42:29 -07:00
|
|
|
output->stop_code = OBS_OUTPUT_DISCONNECTED;
|
2016-06-20 16:07:29 -07:00
|
|
|
os_atomic_set_bool(&output->reconnecting, false);
|
2016-06-11 11:42:29 -07:00
|
|
|
if (delay_active(output))
|
2016-06-20 16:07:29 -07:00
|
|
|
os_atomic_set_bool(&output->delay_active, false);
|
2016-06-11 11:42:29 -07:00
|
|
|
obs_output_end_data_capture(output);
|
2014-07-02 16:38:29 -07:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2016-06-20 16:07:29 -07:00
|
|
|
if (!reconnecting(output)) {
|
|
|
|
os_atomic_set_bool(&output->reconnecting, true);
|
2014-07-02 16:38:29 -07:00
|
|
|
os_event_reset(output->reconnect_stop_event);
|
|
|
|
}
|
|
|
|
|
2015-05-10 16:07:22 -07:00
|
|
|
if (output->reconnect_retries) {
|
2022-06-26 00:28:00 -07:00
|
|
|
output->reconnect_retry_cur_msec =
|
|
|
|
(uint32_t)(output->reconnect_retry_cur_msec *
|
|
|
|
output->reconnect_retry_exp);
|
|
|
|
if (output->reconnect_retry_cur_msec >
|
|
|
|
RECONNECT_RETRY_MAX_MSEC) {
|
2022-06-25 16:19:48 -07:00
|
|
|
output->reconnect_retry_cur_msec =
|
|
|
|
RECONNECT_RETRY_MAX_MSEC;
|
2022-06-26 00:28:00 -07:00
|
|
|
}
|
2015-05-10 16:07:22 -07:00
|
|
|
}
|
|
|
|
|
2014-07-02 16:38:29 -07:00
|
|
|
output->reconnect_retries++;
|
|
|
|
|
2016-06-11 11:42:29 -07:00
|
|
|
output->stop_code = OBS_OUTPUT_DISCONNECTED;
|
2019-06-22 22:13:45 -07:00
|
|
|
ret = pthread_create(&output->reconnect_thread, NULL, &reconnect_thread,
|
|
|
|
output);
|
2014-07-02 16:38:29 -07:00
|
|
|
if (ret < 0) {
|
|
|
|
blog(LOG_WARNING, "Failed to create reconnect thread");
|
2016-06-20 16:07:29 -07:00
|
|
|
os_atomic_set_bool(&output->reconnecting, false);
|
2014-07-02 16:38:29 -07:00
|
|
|
} else {
|
2022-06-25 16:19:48 -07:00
|
|
|
blog(LOG_INFO, "Output '%s': Reconnecting in %.02f seconds..",
|
|
|
|
output->context.name,
|
|
|
|
(float)(output->reconnect_retry_cur_msec / 1000.0));
|
2014-07-02 16:38:29 -07:00
|
|
|
|
|
|
|
signal_reconnect(output);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-06-11 11:42:29 -07:00
|
|
|
static inline bool can_reconnect(const obs_output_t *output, int code)
|
|
|
|
{
|
|
|
|
bool reconnect_active = output->reconnect_retry_max != 0;
|
|
|
|
|
|
|
|
return (reconnecting(output) && code != OBS_OUTPUT_SUCCESS) ||
|
2019-06-22 22:13:45 -07:00
|
|
|
(reconnect_active && code == OBS_OUTPUT_DISCONNECTED);
|
2016-06-11 11:42:29 -07:00
|
|
|
}
|
|
|
|
|
2014-09-25 17:44:05 -07:00
|
|
|
void obs_output_signal_stop(obs_output_t *output, int code)
|
2014-04-01 11:55:18 -07:00
|
|
|
{
|
2015-10-17 02:51:13 -07:00
|
|
|
if (!obs_output_valid(output, "obs_output_signal_stop"))
|
2014-07-02 16:38:29 -07:00
|
|
|
return;
|
|
|
|
|
2016-06-11 11:42:29 -07:00
|
|
|
output->stop_code = code;
|
libobs: Add encoded output delay support
This feature allows a user to delay an output (as long as the output
itself supports it). Needless to say this intended for live streams,
where users may want to delay their streams to prevent stream sniping,
cheating, and other such things.
The design this time was a bit more elaborate, but still simple in
design: the user can now schedule stops/starts without having to wait
for the stream itself to stop before being able to take any action.
Optionally, they can also forcibly stop stream (and delay) in case
something happens which they might not want to be streamed.
Additionally, a new option was added to preserve stream cutoff point on
disconnections/reconnections, so that if you get disconnected while
streaming, when it reconnects, it will reconnect right at the point
where it left off. This will probably be quite useful for a number of
applications in addition to regular delay, such as setting the delay to
1 second and then using this feature to minimize, for example, a
critical stream such as a tournament stream from getting any of its
stream data cut off. However, using this feature will of course cause
the stream data to buffer and increase delay (and memory usage) while
it's in the process of reconnecting.
2015-09-06 15:39:46 -07:00
|
|
|
|
2016-06-11 11:42:29 -07:00
|
|
|
if (can_reconnect(output, code)) {
|
|
|
|
if (delay_active(output))
|
|
|
|
os_atomic_inc_long(&output->delay_restart_refs);
|
|
|
|
obs_output_end_data_capture_internal(output, false);
|
2014-07-02 16:38:29 -07:00
|
|
|
output_reconnect(output);
|
libobs: Add encoded output delay support
This feature allows a user to delay an output (as long as the output
itself supports it). Needless to say this intended for live streams,
where users may want to delay their streams to prevent stream sniping,
cheating, and other such things.
The design this time was a bit more elaborate, but still simple in
design: the user can now schedule stops/starts without having to wait
for the stream itself to stop before being able to take any action.
Optionally, they can also forcibly stop stream (and delay) in case
something happens which they might not want to be streamed.
Additionally, a new option was added to preserve stream cutoff point on
disconnections/reconnections, so that if you get disconnected while
streaming, when it reconnects, it will reconnect right at the point
where it left off. This will probably be quite useful for a number of
applications in addition to regular delay, such as setting the delay to
1 second and then using this feature to minimize, for example, a
critical stream such as a tournament stream from getting any of its
stream data cut off. However, using this feature will of course cause
the stream data to buffer and increase delay (and memory usage) while
it's in the process of reconnecting.
2015-09-06 15:39:46 -07:00
|
|
|
} else {
|
2016-06-11 11:42:29 -07:00
|
|
|
if (delay_active(output))
|
2016-06-20 16:07:29 -07:00
|
|
|
os_atomic_set_bool(&output->delay_active, false);
|
2016-06-11 11:42:29 -07:00
|
|
|
obs_output_end_data_capture(output);
|
libobs: Add encoded output delay support
This feature allows a user to delay an output (as long as the output
itself supports it). Needless to say this intended for live streams,
where users may want to delay their streams to prevent stream sniping,
cheating, and other such things.
The design this time was a bit more elaborate, but still simple in
design: the user can now schedule stops/starts without having to wait
for the stream itself to stop before being able to take any action.
Optionally, they can also forcibly stop stream (and delay) in case
something happens which they might not want to be streamed.
Additionally, a new option was added to preserve stream cutoff point on
disconnections/reconnections, so that if you get disconnected while
streaming, when it reconnects, it will reconnect right at the point
where it left off. This will probably be quite useful for a number of
applications in addition to regular delay, such as setting the delay to
1 second and then using this feature to minimize, for example, a
critical stream such as a tournament stream from getting any of its
stream data cut off. However, using this feature will of course cause
the stream data to buffer and increase delay (and memory usage) while
it's in the process of reconnecting.
2015-09-06 15:39:46 -07:00
|
|
|
}
|
Implement encoder usage with outputs
- Make it so that encoders can be assigned to outputs. If an encoder
is destroyed, it will automatically remove itself from that output.
I specifically didn't want to do reference counting because it leaves
too much potential for unchecked references and it just felt like it
would be more trouble than it's worth.
- Add a 'flags' value to the output definition structure. This lets
the output specify if it uses video/audio, and whether the output is
meant to be used with OBS encoders or not.
- Remove boilerplate code for outputs. This makes it easier to program
outputs. The boilerplate code involved before was mostly just
involving connecting to the audio/video data streams directly in each
output plugin.
Instead of doing that, simply add plugin callback functions for
receiving video/audio (either encoded or non-encoded, whichever it's
set to use), and then call obs_output_begin_data_capture and
obs_output_end_data_capture to automatically handle setting up
connections to raw or encoded video/audio streams for the plugin.
- Remove 'active' function from output callbacks, as it's no longer
really needed now that the libobs output context automatically knows
when the output is active or not.
- Make it so that an encoder cannot be destroyed until all data
connections to the encoder have been removed.
- Change the 'start' and 'stop' functions in the encoder interface to
just an 'initialize' callback, which initializes the encoder.
- Make it so that the encoder must be initialized first before the data
stream can be started. The reason why initialization was separated
from starting the encoder stream was because we need to be able to
check that the settings used with the encoder *can* be used first.
This problem was especially annoying if you had both video/audio
encoding. Before, you'd have to check the return value from
obs_encoder_start, and if that second encoder fails, then you
basically had to stop the first encoder again, making for
unnecessary boilerplate code whenever starting up two encoders.
2014-03-27 21:50:15 -07:00
|
|
|
}
|
2015-05-03 16:55:43 -07:00
|
|
|
|
|
|
|
void obs_output_addref(obs_output_t *output)
|
|
|
|
{
|
|
|
|
if (!output)
|
|
|
|
return;
|
|
|
|
|
2022-02-02 21:56:30 -08:00
|
|
|
obs_ref_addref(&output->context.control->ref);
|
2015-05-03 16:55:43 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
void obs_output_release(obs_output_t *output)
|
|
|
|
{
|
|
|
|
if (!output)
|
|
|
|
return;
|
|
|
|
|
2022-02-02 21:56:30 -08:00
|
|
|
obs_weak_output_t *control = get_weak(output);
|
2015-05-03 16:55:43 -07:00
|
|
|
if (obs_ref_release(&control->ref)) {
|
|
|
|
// The order of operations is important here since
|
|
|
|
// get_context_by_name in obs.c relies on weak refs
|
|
|
|
// being alive while the context is listed
|
|
|
|
obs_output_destroy(output);
|
|
|
|
obs_weak_output_release(control);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void obs_weak_output_addref(obs_weak_output_t *weak)
|
|
|
|
{
|
|
|
|
if (!weak)
|
|
|
|
return;
|
|
|
|
|
|
|
|
obs_weak_ref_addref(&weak->ref);
|
|
|
|
}
|
|
|
|
|
|
|
|
void obs_weak_output_release(obs_weak_output_t *weak)
|
|
|
|
{
|
|
|
|
if (!weak)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (obs_weak_ref_release(&weak->ref))
|
|
|
|
bfree(weak);
|
|
|
|
}
|
|
|
|
|
|
|
|
obs_output_t *obs_output_get_ref(obs_output_t *output)
|
|
|
|
{
|
|
|
|
if (!output)
|
|
|
|
return NULL;
|
|
|
|
|
2022-02-02 21:56:30 -08:00
|
|
|
return obs_weak_output_get_output(get_weak(output));
|
2015-05-03 16:55:43 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
obs_weak_output_t *obs_output_get_weak_output(obs_output_t *output)
|
|
|
|
{
|
|
|
|
if (!output)
|
|
|
|
return NULL;
|
|
|
|
|
2022-02-02 21:56:30 -08:00
|
|
|
obs_weak_output_t *weak = get_weak(output);
|
2015-05-03 16:55:43 -07:00
|
|
|
obs_weak_output_addref(weak);
|
|
|
|
return weak;
|
|
|
|
}
|
|
|
|
|
|
|
|
obs_output_t *obs_weak_output_get_output(obs_weak_output_t *weak)
|
|
|
|
{
|
|
|
|
if (!weak)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
if (obs_weak_ref_get_ref(&weak->ref))
|
|
|
|
return weak->output;
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool obs_weak_output_references_output(obs_weak_output_t *weak,
|
2019-06-22 22:13:45 -07:00
|
|
|
obs_output_t *output)
|
2015-05-03 16:55:43 -07:00
|
|
|
{
|
|
|
|
return weak && output && weak->output == output;
|
|
|
|
}
|
2015-09-15 22:51:37 -07:00
|
|
|
|
|
|
|
void *obs_output_get_type_data(obs_output_t *output)
|
|
|
|
{
|
|
|
|
return obs_output_valid(output, "obs_output_get_type_data")
|
2019-06-22 22:13:45 -07:00
|
|
|
? output->info.type_data
|
|
|
|
: NULL;
|
2015-09-15 22:51:37 -07:00
|
|
|
}
|
2015-10-19 15:01:25 -07:00
|
|
|
|
|
|
|
const char *obs_output_get_id(const obs_output_t *output)
|
|
|
|
{
|
2019-06-22 22:13:45 -07:00
|
|
|
return obs_output_valid(output, "obs_output_get_id") ? output->info.id
|
|
|
|
: NULL;
|
2015-10-19 15:01:25 -07:00
|
|
|
}
|
2016-11-17 05:25:23 -08:00
|
|
|
|
2019-08-26 15:58:20 -07:00
|
|
|
void obs_output_caption(obs_output_t *output,
|
|
|
|
const struct obs_source_cea_708 *captions)
|
|
|
|
{
|
|
|
|
pthread_mutex_lock(&output->caption_mutex);
|
2020-11-02 22:08:10 -08:00
|
|
|
for (size_t i = 0; i < captions->packets; i++) {
|
2019-08-26 15:58:20 -07:00
|
|
|
circlebuf_push_back(&output->caption_data,
|
|
|
|
captions->data + (i * 3),
|
|
|
|
3 * sizeof(uint8_t));
|
|
|
|
}
|
|
|
|
pthread_mutex_unlock(&output->caption_mutex);
|
|
|
|
}
|
|
|
|
|
2016-11-17 05:25:23 -08:00
|
|
|
static struct caption_text *caption_text_new(const char *text, size_t bytes,
|
2019-06-22 22:13:45 -07:00
|
|
|
struct caption_text *tail,
|
|
|
|
struct caption_text **head,
|
|
|
|
double display_duration)
|
2016-11-17 05:25:23 -08:00
|
|
|
{
|
|
|
|
struct caption_text *next = bzalloc(sizeof(struct caption_text));
|
2019-06-22 22:13:45 -07:00
|
|
|
snprintf(&next->text[0], CAPTION_LINE_BYTES + 1, "%.*s", (int)bytes,
|
|
|
|
text);
|
2019-02-19 20:33:33 -08:00
|
|
|
next->display_duration = display_duration;
|
2016-11-17 05:25:23 -08:00
|
|
|
|
|
|
|
if (!*head) {
|
|
|
|
*head = next;
|
|
|
|
} else {
|
|
|
|
tail->next = next;
|
|
|
|
}
|
|
|
|
|
|
|
|
return next;
|
|
|
|
}
|
|
|
|
|
|
|
|
void obs_output_output_caption_text1(obs_output_t *output, const char *text)
|
|
|
|
{
|
|
|
|
if (!obs_output_valid(output, "obs_output_output_caption_text1"))
|
|
|
|
return;
|
2019-02-19 20:33:33 -08:00
|
|
|
obs_output_output_caption_text2(output, text, 2.0f);
|
|
|
|
}
|
|
|
|
|
|
|
|
void obs_output_output_caption_text2(obs_output_t *output, const char *text,
|
2019-06-22 22:13:45 -07:00
|
|
|
double display_duration)
|
2019-02-19 20:33:33 -08:00
|
|
|
{
|
|
|
|
if (!obs_output_valid(output, "obs_output_output_caption_text2"))
|
|
|
|
return;
|
2016-11-17 05:25:23 -08:00
|
|
|
if (!active(output))
|
|
|
|
return;
|
|
|
|
|
2017-03-19 04:35:51 -07:00
|
|
|
// split text into 32 character strings
|
2016-11-17 05:25:23 -08:00
|
|
|
int size = (int)strlen(text);
|
|
|
|
blog(LOG_DEBUG, "Caption text: %s", text);
|
|
|
|
|
|
|
|
pthread_mutex_lock(&output->caption_mutex);
|
|
|
|
|
2019-06-22 22:13:45 -07:00
|
|
|
output->caption_tail =
|
|
|
|
caption_text_new(text, size, output->caption_tail,
|
|
|
|
&output->caption_head, display_duration);
|
2016-11-17 05:25:23 -08:00
|
|
|
|
|
|
|
pthread_mutex_unlock(&output->caption_mutex);
|
|
|
|
}
|
2017-01-24 15:30:01 -08:00
|
|
|
|
|
|
|
float obs_output_get_congestion(obs_output_t *output)
|
|
|
|
{
|
|
|
|
if (!obs_output_valid(output, "obs_output_get_congestion"))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (output->info.get_congestion) {
|
|
|
|
float val = output->info.get_congestion(output->context.data);
|
2019-06-22 22:13:45 -07:00
|
|
|
if (val < 0.0f)
|
|
|
|
val = 0.0f;
|
|
|
|
else if (val > 1.0f)
|
|
|
|
val = 1.0f;
|
2017-01-24 15:30:01 -08:00
|
|
|
return val;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
2017-04-24 03:23:34 -07:00
|
|
|
|
|
|
|
int obs_output_get_connect_time_ms(obs_output_t *output)
|
|
|
|
{
|
|
|
|
if (!obs_output_valid(output, "obs_output_get_connect_time_ms"))
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
if (output->info.get_connect_time_ms)
|
|
|
|
return output->info.get_connect_time_ms(output->context.data);
|
|
|
|
return -1;
|
|
|
|
}
|
2017-05-12 22:02:50 -07:00
|
|
|
|
2017-05-15 03:04:11 -07:00
|
|
|
const char *obs_output_get_last_error(obs_output_t *output)
|
|
|
|
{
|
|
|
|
if (!obs_output_valid(output, "obs_output_get_last_error"))
|
|
|
|
return NULL;
|
|
|
|
|
2021-10-23 05:31:16 -07:00
|
|
|
if (output->last_error_message) {
|
|
|
|
return output->last_error_message;
|
|
|
|
} else {
|
|
|
|
obs_encoder_t *vencoder = output->video_encoder;
|
|
|
|
if (vencoder && vencoder->last_error_message) {
|
|
|
|
return vencoder->last_error_message;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (size_t i = 0; i < MAX_AUDIO_MIXES; i++) {
|
|
|
|
obs_encoder_t *aencoder = output->audio_encoders[i];
|
|
|
|
if (aencoder && aencoder->last_error_message) {
|
|
|
|
return aencoder->last_error_message;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
2017-05-15 03:04:11 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
void obs_output_set_last_error(obs_output_t *output, const char *message)
|
|
|
|
{
|
|
|
|
if (!obs_output_valid(output, "obs_output_set_last_error"))
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (output->last_error_message)
|
|
|
|
bfree(output->last_error_message);
|
|
|
|
|
|
|
|
if (message)
|
|
|
|
output->last_error_message = bstrdup(message);
|
|
|
|
else
|
|
|
|
output->last_error_message = NULL;
|
|
|
|
}
|
|
|
|
|
2017-05-12 22:02:50 -07:00
|
|
|
bool obs_output_reconnecting(const obs_output_t *output)
|
|
|
|
{
|
|
|
|
if (!obs_output_valid(output, "obs_output_reconnecting"))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
return reconnecting(output);
|
|
|
|
}
|
2017-07-12 22:54:04 -07:00
|
|
|
|
|
|
|
const char *obs_output_get_supported_video_codecs(const obs_output_t *output)
|
|
|
|
{
|
2019-06-22 22:13:45 -07:00
|
|
|
return obs_output_valid(output, __FUNCTION__)
|
|
|
|
? output->info.encoded_video_codecs
|
|
|
|
: NULL;
|
2017-07-12 22:54:04 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
const char *obs_output_get_supported_audio_codecs(const obs_output_t *output)
|
|
|
|
{
|
2019-06-22 22:13:45 -07:00
|
|
|
return obs_output_valid(output, __FUNCTION__)
|
|
|
|
? output->info.encoded_audio_codecs
|
|
|
|
: NULL;
|
2017-07-12 22:54:04 -07:00
|
|
|
}
|