2013-09-30 19:37:13 -07:00
|
|
|
/******************************************************************************
|
2014-02-22 19:14:19 -08:00
|
|
|
Copyright (C) 2013-2014 by Hugh Bailey <obs.jim@gmail.com>
|
2013-09-30 19:37:13 -07:00
|
|
|
|
|
|
|
This program is free software: you can redistribute it and/or modify
|
|
|
|
it under the terms of the GNU General Public License as published by
|
2013-12-02 21:24:38 -08:00
|
|
|
the Free Software Foundation, either version 2 of the License, or
|
2013-09-30 19:37:13 -07:00
|
|
|
(at your option) any later version.
|
|
|
|
|
|
|
|
This program is distributed in the hope that it will be useful,
|
|
|
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
GNU General Public License for more details.
|
|
|
|
|
|
|
|
You should have received a copy of the GNU General Public License
|
|
|
|
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
******************************************************************************/
|
|
|
|
|
2014-02-14 14:56:01 -08:00
|
|
|
#include <inttypes.h>
|
2017-10-08 03:15:28 -07:00
|
|
|
#include <math.h>
|
2014-02-14 14:56:01 -08:00
|
|
|
|
2013-10-29 23:54:43 -07:00
|
|
|
#include "media-io/format-conversion.h"
|
2014-02-18 12:37:56 -08:00
|
|
|
#include "media-io/video-frame.h"
|
2014-05-20 07:26:18 -07:00
|
|
|
#include "media-io/audio-io.h"
|
2014-03-16 18:26:46 -07:00
|
|
|
#include "util/threading.h"
|
2013-10-24 00:57:55 -07:00
|
|
|
#include "util/platform.h"
|
2013-12-30 09:09:20 -08:00
|
|
|
#include "callback/calldata.h"
|
2013-11-26 21:26:14 -08:00
|
|
|
#include "graphics/matrix3.h"
|
|
|
|
#include "graphics/vec3.h"
|
2013-10-24 00:57:55 -07:00
|
|
|
|
2013-09-30 19:37:13 -07:00
|
|
|
#include "obs.h"
|
2014-01-26 17:48:14 -08:00
|
|
|
#include "obs-internal.h"
|
2013-09-30 19:37:13 -07:00
|
|
|
|
2015-10-16 18:49:45 -07:00
|
|
|
static inline bool data_valid(const struct obs_source *source, const char *f)
|
2014-05-04 16:20:11 -07:00
|
|
|
{
|
2015-10-21 06:28:03 -07:00
|
|
|
return obs_source_valid(source, f) && source->context.data;
|
2014-05-04 16:20:11 -07:00
|
|
|
}
|
|
|
|
|
2016-03-15 20:39:36 -07:00
|
|
|
static inline bool deinterlacing_enabled(const struct obs_source *source)
|
|
|
|
{
|
|
|
|
return source->deinterlace_mode != OBS_DEINTERLACE_MODE_DISABLE;
|
|
|
|
}
|
|
|
|
|
2017-12-25 12:20:54 -08:00
|
|
|
struct obs_source_info *get_source_info(const char *id)
|
2013-09-30 19:37:13 -07:00
|
|
|
{
|
2015-12-29 15:25:45 -08:00
|
|
|
for (size_t i = 0; i < obs->source_types.num; i++) {
|
|
|
|
struct obs_source_info *info = &obs->source_types.array[i];
|
2013-12-20 16:23:19 -08:00
|
|
|
if (strcmp(info->id, id) == 0)
|
2013-09-30 19:37:13 -07:00
|
|
|
return info;
|
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2014-03-01 04:54:55 -08:00
|
|
|
static const char *source_signals[] = {
|
|
|
|
"void destroy(ptr source)",
|
|
|
|
"void remove(ptr source)",
|
2015-10-28 12:38:47 -07:00
|
|
|
"void save(ptr source)",
|
|
|
|
"void load(ptr source)",
|
2014-03-01 04:54:55 -08:00
|
|
|
"void activate(ptr source)",
|
|
|
|
"void deactivate(ptr source)",
|
|
|
|
"void show(ptr source)",
|
|
|
|
"void hide(ptr source)",
|
2015-03-22 14:54:07 -07:00
|
|
|
"void mute(ptr source, bool muted)",
|
2015-04-30 18:22:12 -07:00
|
|
|
"void push_to_mute_changed(ptr source, bool enabled)",
|
|
|
|
"void push_to_mute_delay(ptr source, int delay)",
|
|
|
|
"void push_to_talk_changed(ptr source, bool enabled)",
|
|
|
|
"void push_to_talk_delay(ptr source, int delay)",
|
2015-03-17 18:15:50 -07:00
|
|
|
"void enable(ptr source, bool enabled)",
|
2014-06-30 00:05:35 -07:00
|
|
|
"void rename(ptr source, string new_name, string prev_name)",
|
2014-03-01 04:54:55 -08:00
|
|
|
"void volume(ptr source, in out float volume)",
|
2014-09-30 06:40:46 -07:00
|
|
|
"void update_properties(ptr source)",
|
2014-10-23 09:56:50 -07:00
|
|
|
"void update_flags(ptr source, int flags)",
|
2014-12-27 20:55:03 -08:00
|
|
|
"void audio_sync(ptr source, int out int offset)",
|
(API Change) Add support for multiple audio mixers
API changed:
--------------------------
void obs_output_set_audio_encoder(
obs_output_t *output,
obs_encoder_t *encoder);
obs_encoder_t *obs_output_get_audio_encoder(
const obs_output_t *output);
obs_encoder_t *obs_audio_encoder_create(
const char *id,
const char *name,
obs_data_t *settings);
Changed to:
--------------------------
/* 'idx' specifies the track index of the output */
void obs_output_set_audio_encoder(
obs_output_t *output,
obs_encoder_t *encoder,
size_t idx);
/* 'idx' specifies the track index of the output */
obs_encoder_t *obs_output_get_audio_encoder(
const obs_output_t *output,
size_t idx);
/* 'mixer_idx' specifies the mixer index to capture audio from */
obs_encoder_t *obs_audio_encoder_create(
const char *id,
const char *name,
obs_data_t *settings,
size_t mixer_idx);
Overview
--------------------------
This feature allows multiple audio mixers to be used at a time. This
capability was able to be added with surprisingly very little extra
overhead. Audio will not be mixed unless it's assigned to a specific
mixer, and mixers will not mix unless they have an active mix
connection.
Mostly this will be useful for being able to separate out specific audio
for recording versus streaming, but will also be useful for certain
streaming services that support multiple audio streams via RTMP.
I didn't want to use a variable amount of mixers due to the desire to
reduce heap allocations, so currently I set the limit to 4 simultaneous
mixers; this number can be increased later if needed, but honestly I
feel like it's just the right number to use.
Sources:
Sources can now specify which audio mixers their audio is mixed to; this
can be a single mixer or multiple mixers at a time. The
obs_source_set_audio_mixers function sets the audio mixer which an audio
source applies to. For example, 0xF would mean that the source applies
to all four mixers.
Audio Encoders:
Audio encoders now must specify which specific audio mixer they use when
they encode audio data.
Outputs:
Outputs that use encoders can now support multiple audio tracks at once
if they have the OBS_OUTPUT_MULTI_TRACK capability flag set. This is
mostly only useful for certain types of RTMP transmissions, though may
be useful for file formats that support multiple audio tracks as well
later on.
2015-01-14 02:12:08 -08:00
|
|
|
"void audio_mixers(ptr source, in out int mixers)",
|
2019-09-19 23:37:29 -07:00
|
|
|
"void audio_activate(ptr source)",
|
|
|
|
"void audio_deactivate(ptr source)",
|
2015-02-25 20:48:06 -08:00
|
|
|
"void filter_add(ptr source, ptr filter)",
|
|
|
|
"void filter_remove(ptr source, ptr filter)",
|
2015-03-14 06:58:13 -07:00
|
|
|
"void reorder_filters(ptr source)",
|
libobs: Implement transition sources
Transition sources are implemented by registering a source type as
OBS_SOURCE_TYPE_TRANSITION. They're automatically marked as video
composite sources, and video_render/audio_render callbacks must be set
when registering the source. get_width and get_height callbacks are
unused for these types of sources, as transitions automatically handle
width/height behind the scenes with the transition settings.
In the video_render callback, the helper function
obs_transition_video_render is used to assist in automatically
processing and rendering the audio. A render callback is passed to the
function, which in turn passes to/from textures that are automatically
rendered in the back-end.
Similarly, in the audio_render callback, the helper function
obs_transition_audio_render is used to assist in automatically
processing and rendering the audio. Two mix callbacks are used to
handle how the source/destination sources are mixed together. To ensure
the best possible quality, audio processing is per-sample.
Transitions can be set to automatically resize, or they can be set to
have a fixed size. Sources within transitions can be made to scale to
the transition size (with or without aspect ratio), or to not scale
unless they're bigger than the transition. They can have a specific
alignment within the transition, or they just default to top-left.
These features are implemented for the purpose of extending transitions
to also act as "switch" sources later, where you can switch to/from two
different sources using the transition animation.
Planned (but not yet implemented and lower priority) features:
- "Switch" transitions which allow the ability to switch back and forth
between two sources with a transitioning animation without discarding
the references
- Easing options to allow the option to transition with a bezier or
custom curve
- Manual transitioning to allow the front-end/user to manually control
the transition offset
2016-01-03 16:41:14 -08:00
|
|
|
"void transition_start(ptr source)",
|
|
|
|
"void transition_video_stop(ptr source)",
|
|
|
|
"void transition_stop(ptr source)",
|
2019-06-22 22:13:45 -07:00
|
|
|
NULL,
|
2014-03-01 04:54:55 -08:00
|
|
|
};
|
|
|
|
|
2019-06-22 22:13:45 -07:00
|
|
|
bool obs_source_init_context(struct obs_source *source, obs_data_t *settings,
|
|
|
|
const char *name, obs_data_t *hotkey_data,
|
|
|
|
bool private)
|
2013-12-26 22:10:15 -08:00
|
|
|
{
|
2016-02-26 18:18:00 -08:00
|
|
|
if (!obs_context_data_init(&source->context, OBS_OBJ_TYPE_SOURCE,
|
2019-06-22 22:13:45 -07:00
|
|
|
settings, name, hotkey_data, private))
|
2014-03-01 04:54:55 -08:00
|
|
|
return false;
|
|
|
|
|
libobs: Add services API, reduce repeated code
Add API for streaming services. The services API simplifies the
creation of custom service features and user interface.
Custom streaming services later on will be able to do things such as:
- Be able to use service-specific APIs via modules, allowing a more
direct means of communicating with the service and requesting or
setting service-specific information
- Get URL/stream key via other means of authentication such as OAuth,
or be able to build custom URLs for services that require that sort
of thing.
- Query information (such as viewer count, chat, follower
notifications, and other information)
- Set channel information (such as current game, current channel title,
activating commercials)
Also, I reduce some repeated code that was used for all libobs objects.
This includes the name of the object, the private data, settings, as
well as the signal and procedure handlers.
I also switched to using linked lists for the global object lists,
rather than using an array of pointers (you could say it was..
pointless.) ..Anyway, the linked list info is also stored in the shared
context data structure.
2014-04-19 20:38:53 -07:00
|
|
|
return signal_handler_add_array(source->context.signals,
|
2019-06-22 22:13:45 -07:00
|
|
|
source_signals);
|
2013-12-26 22:10:15 -08:00
|
|
|
}
|
|
|
|
|
2015-12-29 15:25:45 -08:00
|
|
|
const char *obs_source_get_display_name(const char *id)
|
2013-12-30 05:56:39 -08:00
|
|
|
{
|
2015-12-29 15:25:45 -08:00
|
|
|
const struct obs_source_info *info = get_source_info(id);
|
2015-09-16 01:30:51 -07:00
|
|
|
return (info != NULL) ? info->get_name(info->type_data) : NULL;
|
2013-12-30 05:56:39 -08:00
|
|
|
}
|
|
|
|
|
2015-12-17 04:28:35 -08:00
|
|
|
static void allocate_audio_output_buffer(struct obs_source *source)
|
|
|
|
{
|
2019-06-22 22:13:45 -07:00
|
|
|
size_t size = sizeof(float) * AUDIO_OUTPUT_FRAMES * MAX_AUDIO_CHANNELS *
|
|
|
|
MAX_AUDIO_MIXES;
|
2015-12-17 04:28:35 -08:00
|
|
|
float *ptr = bzalloc(size);
|
|
|
|
|
|
|
|
for (size_t mix = 0; mix < MAX_AUDIO_MIXES; mix++) {
|
|
|
|
size_t mix_pos = mix * AUDIO_OUTPUT_FRAMES * MAX_AUDIO_CHANNELS;
|
|
|
|
|
|
|
|
for (size_t i = 0; i < MAX_AUDIO_CHANNELS; i++) {
|
|
|
|
source->audio_output_buf[mix][i] =
|
|
|
|
ptr + mix_pos + AUDIO_OUTPUT_FRAMES * i;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-08-21 14:35:40 -07:00
|
|
|
static void allocate_audio_mix_buffer(struct obs_source *source)
|
|
|
|
{
|
|
|
|
size_t size = sizeof(float) * AUDIO_OUTPUT_FRAMES * MAX_AUDIO_CHANNELS;
|
|
|
|
float *ptr = bzalloc(size);
|
|
|
|
|
|
|
|
for (size_t i = 0; i < MAX_AUDIO_CHANNELS; i++) {
|
|
|
|
source->audio_mix_buf[i] = ptr + AUDIO_OUTPUT_FRAMES * i;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-03-15 20:39:36 -07:00
|
|
|
static inline bool is_async_video_source(const struct obs_source *source)
|
|
|
|
{
|
|
|
|
return (source->info.output_flags & OBS_SOURCE_ASYNC_VIDEO) ==
|
2019-06-22 22:13:45 -07:00
|
|
|
OBS_SOURCE_ASYNC_VIDEO;
|
2016-03-15 20:39:36 -07:00
|
|
|
}
|
|
|
|
|
2015-12-17 06:46:10 -08:00
|
|
|
static inline bool is_audio_source(const struct obs_source *source)
|
|
|
|
{
|
|
|
|
return source->info.output_flags & OBS_SOURCE_AUDIO;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline bool is_composite_source(const struct obs_source *source)
|
|
|
|
{
|
|
|
|
return source->info.output_flags & OBS_SOURCE_COMPOSITE;
|
|
|
|
}
|
|
|
|
|
2016-03-15 20:39:36 -07:00
|
|
|
extern char *find_libobs_data_file(const char *file);
|
|
|
|
|
2013-12-20 10:56:01 -08:00
|
|
|
/* internal initialization */
|
2019-08-25 19:16:20 -07:00
|
|
|
static bool obs_source_init(struct obs_source *source)
|
2013-09-30 19:37:13 -07:00
|
|
|
{
|
2015-03-07 08:36:38 -08:00
|
|
|
pthread_mutexattr_t attr;
|
|
|
|
|
2014-02-20 14:53:16 -08:00
|
|
|
source->user_volume = 1.0f;
|
libobs: Implement new audio subsystem
The new audio subsystem fixes two issues:
- First Primary issue it fixes is the ability for parent sources to
intercept the audio of child sources, and do custom processing on
them. The main reason for this was the ability to do custom
cross-fading in transitions, but it's also useful for things such as
side-chain effects, applying audio effects to entire scenes, applying
scene-specific audio filters on sub-sources, and other such
possibilities.
- The secondary issue that needed fixing was audio buffering.
Previously, audio buffering was always a fixed buffer size, so it
would always have exactly a certain number of milliseconds of audio
buffering (and thus output delay). Instead, it now dynamically
increases audio buffering only as necessary, minimizing output delay,
and removing the need for users to have to worry about an audio
buffering setting.
The new design makes it so that audio from the leaves of the scene graph
flow to the root nodes, and can be intercepted by parent sources. Each
audio source handles its own buffering, and each audio tick a specific
number of audio frames are popped from the front of the circular buffer
on each audio source. Composite sources (such as scenes) can access the
audio for child sources and do custom processing or mixing on that
audio. Composite sources use the audio_render callback of sources to do
synchronous or deferred audio processing per audio tick. Things like
scenes now mix audio from their sub-sources.
2015-12-20 03:06:35 -08:00
|
|
|
source->volume = 1.0f;
|
2014-02-20 15:16:25 -08:00
|
|
|
source->sync_offset = 0;
|
2017-10-08 03:15:28 -07:00
|
|
|
source->balance = 0.5f;
|
2019-09-19 23:37:29 -07:00
|
|
|
source->audio_active = true;
|
2013-10-24 00:57:55 -07:00
|
|
|
pthread_mutex_init_value(&source->filter_mutex);
|
2015-01-03 23:19:09 -08:00
|
|
|
pthread_mutex_init_value(&source->async_mutex);
|
2013-10-24 00:57:55 -07:00
|
|
|
pthread_mutex_init_value(&source->audio_mutex);
|
2015-12-17 04:28:35 -08:00
|
|
|
pthread_mutex_init_value(&source->audio_buf_mutex);
|
2016-01-07 19:48:36 -08:00
|
|
|
pthread_mutex_init_value(&source->audio_cb_mutex);
|
2014-01-28 14:45:30 -08:00
|
|
|
|
2015-03-07 08:36:38 -08:00
|
|
|
if (pthread_mutexattr_init(&attr) != 0)
|
|
|
|
return false;
|
|
|
|
if (pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE) != 0)
|
|
|
|
return false;
|
|
|
|
if (pthread_mutex_init(&source->filter_mutex, &attr) != 0)
|
2013-10-24 00:57:55 -07:00
|
|
|
return false;
|
2015-12-17 04:28:35 -08:00
|
|
|
if (pthread_mutex_init(&source->audio_buf_mutex, NULL) != 0)
|
|
|
|
return false;
|
libobs: Implement new audio subsystem
The new audio subsystem fixes two issues:
- First Primary issue it fixes is the ability for parent sources to
intercept the audio of child sources, and do custom processing on
them. The main reason for this was the ability to do custom
cross-fading in transitions, but it's also useful for things such as
side-chain effects, applying audio effects to entire scenes, applying
scene-specific audio filters on sub-sources, and other such
possibilities.
- The secondary issue that needed fixing was audio buffering.
Previously, audio buffering was always a fixed buffer size, so it
would always have exactly a certain number of milliseconds of audio
buffering (and thus output delay). Instead, it now dynamically
increases audio buffering only as necessary, minimizing output delay,
and removing the need for users to have to worry about an audio
buffering setting.
The new design makes it so that audio from the leaves of the scene graph
flow to the root nodes, and can be intercepted by parent sources. Each
audio source handles its own buffering, and each audio tick a specific
number of audio frames are popped from the front of the circular buffer
on each audio source. Composite sources (such as scenes) can access the
audio for child sources and do custom processing or mixing on that
audio. Composite sources use the audio_render callback of sources to do
synchronous or deferred audio processing per audio tick. Things like
scenes now mix audio from their sub-sources.
2015-12-20 03:06:35 -08:00
|
|
|
if (pthread_mutex_init(&source->audio_actions_mutex, NULL) != 0)
|
|
|
|
return false;
|
2016-01-07 19:48:36 -08:00
|
|
|
if (pthread_mutex_init(&source->audio_cb_mutex, NULL) != 0)
|
|
|
|
return false;
|
2013-10-24 00:57:55 -07:00
|
|
|
if (pthread_mutex_init(&source->audio_mutex, NULL) != 0)
|
|
|
|
return false;
|
2015-01-03 23:19:09 -08:00
|
|
|
if (pthread_mutex_init(&source->async_mutex, NULL) != 0)
|
2013-10-24 00:57:55 -07:00
|
|
|
return false;
|
2013-09-30 19:37:13 -07:00
|
|
|
|
2015-12-17 06:46:10 -08:00
|
|
|
if (is_audio_source(source) || is_composite_source(source))
|
2015-12-17 04:28:35 -08:00
|
|
|
allocate_audio_output_buffer(source);
|
2019-08-21 14:35:40 -07:00
|
|
|
if (source->info.audio_mix)
|
|
|
|
allocate_audio_mix_buffer(source);
|
2015-12-17 04:00:14 -08:00
|
|
|
|
2016-04-13 09:39:24 -07:00
|
|
|
if (source->info.type == OBS_SOURCE_TYPE_TRANSITION) {
|
|
|
|
if (!obs_transition_init(source))
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
source->control = bzalloc(sizeof(obs_weak_source_t));
|
|
|
|
source->deinterlace_top_first = true;
|
|
|
|
source->control->source = source;
|
2016-12-21 17:13:19 -08:00
|
|
|
source->audio_mixers = 0xFF;
|
2016-04-13 09:39:24 -07:00
|
|
|
|
2019-08-25 19:17:20 -07:00
|
|
|
source->private_settings = obs_data_create();
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void obs_source_init_finalize(struct obs_source *source)
|
|
|
|
{
|
2015-12-17 06:46:10 -08:00
|
|
|
if (is_audio_source(source)) {
|
2015-12-17 04:00:14 -08:00
|
|
|
pthread_mutex_lock(&obs->data.audio_sources_mutex);
|
|
|
|
|
|
|
|
source->next_audio_source = obs->data.first_audio_source;
|
2019-06-22 22:13:45 -07:00
|
|
|
source->prev_next_audio_source = &obs->data.first_audio_source;
|
2015-12-17 04:00:14 -08:00
|
|
|
if (obs->data.first_audio_source)
|
|
|
|
obs->data.first_audio_source->prev_next_audio_source =
|
|
|
|
&source->next_audio_source;
|
|
|
|
obs->data.first_audio_source = source;
|
|
|
|
|
|
|
|
pthread_mutex_unlock(&obs->data.audio_sources_mutex);
|
2013-10-24 00:57:55 -07:00
|
|
|
}
|
2013-10-18 20:25:13 -07:00
|
|
|
|
2019-06-22 22:13:45 -07:00
|
|
|
obs_context_data_insert(&source->context, &obs->data.sources_mutex,
|
|
|
|
&obs->data.first_source);
|
2013-09-30 19:37:13 -07:00
|
|
|
}
|
|
|
|
|
2019-06-22 22:13:45 -07:00
|
|
|
static bool obs_source_hotkey_mute(void *data, obs_hotkey_pair_id id,
|
|
|
|
obs_hotkey_t *key, bool pressed)
|
2015-04-30 18:22:12 -07:00
|
|
|
{
|
|
|
|
UNUSED_PARAMETER(id);
|
|
|
|
UNUSED_PARAMETER(key);
|
|
|
|
|
|
|
|
struct obs_source *source = data;
|
|
|
|
|
2019-06-22 22:13:45 -07:00
|
|
|
if (!pressed || obs_source_muted(source))
|
|
|
|
return false;
|
2015-04-30 18:22:12 -07:00
|
|
|
|
|
|
|
obs_source_set_muted(source, true);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2019-06-22 22:13:45 -07:00
|
|
|
static bool obs_source_hotkey_unmute(void *data, obs_hotkey_pair_id id,
|
|
|
|
obs_hotkey_t *key, bool pressed)
|
2015-04-30 18:22:12 -07:00
|
|
|
{
|
|
|
|
UNUSED_PARAMETER(id);
|
|
|
|
UNUSED_PARAMETER(key);
|
|
|
|
|
|
|
|
struct obs_source *source = data;
|
|
|
|
|
2019-06-22 22:13:45 -07:00
|
|
|
if (!pressed || !obs_source_muted(source))
|
|
|
|
return false;
|
2015-04-30 18:22:12 -07:00
|
|
|
|
|
|
|
obs_source_set_muted(source, false);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2019-06-22 22:13:45 -07:00
|
|
|
static void obs_source_hotkey_push_to_mute(void *data, obs_hotkey_id id,
|
|
|
|
obs_hotkey_t *key, bool pressed)
|
2015-04-30 18:22:12 -07:00
|
|
|
{
|
2019-06-22 22:13:45 -07:00
|
|
|
struct audio_action action = {.timestamp = os_gettime_ns(),
|
|
|
|
.type = AUDIO_ACTION_PTM,
|
|
|
|
.set = pressed};
|
libobs: Implement new audio subsystem
The new audio subsystem fixes two issues:
- First Primary issue it fixes is the ability for parent sources to
intercept the audio of child sources, and do custom processing on
them. The main reason for this was the ability to do custom
cross-fading in transitions, but it's also useful for things such as
side-chain effects, applying audio effects to entire scenes, applying
scene-specific audio filters on sub-sources, and other such
possibilities.
- The secondary issue that needed fixing was audio buffering.
Previously, audio buffering was always a fixed buffer size, so it
would always have exactly a certain number of milliseconds of audio
buffering (and thus output delay). Instead, it now dynamically
increases audio buffering only as necessary, minimizing output delay,
and removing the need for users to have to worry about an audio
buffering setting.
The new design makes it so that audio from the leaves of the scene graph
flow to the root nodes, and can be intercepted by parent sources. Each
audio source handles its own buffering, and each audio tick a specific
number of audio frames are popped from the front of the circular buffer
on each audio source. Composite sources (such as scenes) can access the
audio for child sources and do custom processing or mixing on that
audio. Composite sources use the audio_render callback of sources to do
synchronous or deferred audio processing per audio tick. Things like
scenes now mix audio from their sub-sources.
2015-12-20 03:06:35 -08:00
|
|
|
|
2015-04-30 18:22:12 -07:00
|
|
|
UNUSED_PARAMETER(id);
|
|
|
|
UNUSED_PARAMETER(key);
|
|
|
|
|
|
|
|
struct obs_source *source = data;
|
|
|
|
|
libobs: Implement new audio subsystem
The new audio subsystem fixes two issues:
- First Primary issue it fixes is the ability for parent sources to
intercept the audio of child sources, and do custom processing on
them. The main reason for this was the ability to do custom
cross-fading in transitions, but it's also useful for things such as
side-chain effects, applying audio effects to entire scenes, applying
scene-specific audio filters on sub-sources, and other such
possibilities.
- The secondary issue that needed fixing was audio buffering.
Previously, audio buffering was always a fixed buffer size, so it
would always have exactly a certain number of milliseconds of audio
buffering (and thus output delay). Instead, it now dynamically
increases audio buffering only as necessary, minimizing output delay,
and removing the need for users to have to worry about an audio
buffering setting.
The new design makes it so that audio from the leaves of the scene graph
flow to the root nodes, and can be intercepted by parent sources. Each
audio source handles its own buffering, and each audio tick a specific
number of audio frames are popped from the front of the circular buffer
on each audio source. Composite sources (such as scenes) can access the
audio for child sources and do custom processing or mixing on that
audio. Composite sources use the audio_render callback of sources to do
synchronous or deferred audio processing per audio tick. Things like
scenes now mix audio from their sub-sources.
2015-12-20 03:06:35 -08:00
|
|
|
pthread_mutex_lock(&source->audio_actions_mutex);
|
|
|
|
da_push_back(source->audio_actions, &action);
|
|
|
|
pthread_mutex_unlock(&source->audio_actions_mutex);
|
|
|
|
|
|
|
|
source->user_push_to_mute_pressed = pressed;
|
2015-04-30 18:22:12 -07:00
|
|
|
}
|
|
|
|
|
2019-06-22 22:13:45 -07:00
|
|
|
static void obs_source_hotkey_push_to_talk(void *data, obs_hotkey_id id,
|
|
|
|
obs_hotkey_t *key, bool pressed)
|
2015-04-30 18:22:12 -07:00
|
|
|
{
|
2019-06-22 22:13:45 -07:00
|
|
|
struct audio_action action = {.timestamp = os_gettime_ns(),
|
|
|
|
.type = AUDIO_ACTION_PTT,
|
|
|
|
.set = pressed};
|
libobs: Implement new audio subsystem
The new audio subsystem fixes two issues:
- First Primary issue it fixes is the ability for parent sources to
intercept the audio of child sources, and do custom processing on
them. The main reason for this was the ability to do custom
cross-fading in transitions, but it's also useful for things such as
side-chain effects, applying audio effects to entire scenes, applying
scene-specific audio filters on sub-sources, and other such
possibilities.
- The secondary issue that needed fixing was audio buffering.
Previously, audio buffering was always a fixed buffer size, so it
would always have exactly a certain number of milliseconds of audio
buffering (and thus output delay). Instead, it now dynamically
increases audio buffering only as necessary, minimizing output delay,
and removing the need for users to have to worry about an audio
buffering setting.
The new design makes it so that audio from the leaves of the scene graph
flow to the root nodes, and can be intercepted by parent sources. Each
audio source handles its own buffering, and each audio tick a specific
number of audio frames are popped from the front of the circular buffer
on each audio source. Composite sources (such as scenes) can access the
audio for child sources and do custom processing or mixing on that
audio. Composite sources use the audio_render callback of sources to do
synchronous or deferred audio processing per audio tick. Things like
scenes now mix audio from their sub-sources.
2015-12-20 03:06:35 -08:00
|
|
|
|
2015-04-30 18:22:12 -07:00
|
|
|
UNUSED_PARAMETER(id);
|
|
|
|
UNUSED_PARAMETER(key);
|
|
|
|
|
|
|
|
struct obs_source *source = data;
|
|
|
|
|
libobs: Implement new audio subsystem
The new audio subsystem fixes two issues:
- First Primary issue it fixes is the ability for parent sources to
intercept the audio of child sources, and do custom processing on
them. The main reason for this was the ability to do custom
cross-fading in transitions, but it's also useful for things such as
side-chain effects, applying audio effects to entire scenes, applying
scene-specific audio filters on sub-sources, and other such
possibilities.
- The secondary issue that needed fixing was audio buffering.
Previously, audio buffering was always a fixed buffer size, so it
would always have exactly a certain number of milliseconds of audio
buffering (and thus output delay). Instead, it now dynamically
increases audio buffering only as necessary, minimizing output delay,
and removing the need for users to have to worry about an audio
buffering setting.
The new design makes it so that audio from the leaves of the scene graph
flow to the root nodes, and can be intercepted by parent sources. Each
audio source handles its own buffering, and each audio tick a specific
number of audio frames are popped from the front of the circular buffer
on each audio source. Composite sources (such as scenes) can access the
audio for child sources and do custom processing or mixing on that
audio. Composite sources use the audio_render callback of sources to do
synchronous or deferred audio processing per audio tick. Things like
scenes now mix audio from their sub-sources.
2015-12-20 03:06:35 -08:00
|
|
|
pthread_mutex_lock(&source->audio_actions_mutex);
|
|
|
|
da_push_back(source->audio_actions, &action);
|
|
|
|
pthread_mutex_unlock(&source->audio_actions_mutex);
|
|
|
|
|
|
|
|
source->user_push_to_talk_pressed = pressed;
|
2015-04-30 18:22:12 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
static void obs_source_init_audio_hotkeys(struct obs_source *source)
|
|
|
|
{
|
2015-07-03 09:21:43 -07:00
|
|
|
if (!(source->info.output_flags & OBS_SOURCE_AUDIO) ||
|
|
|
|
source->info.type != OBS_SOURCE_TYPE_INPUT) {
|
2019-06-22 22:13:45 -07:00
|
|
|
source->mute_unmute_key = OBS_INVALID_HOTKEY_ID;
|
2015-04-30 18:22:12 -07:00
|
|
|
source->push_to_talk_key = OBS_INVALID_HOTKEY_ID;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2019-06-22 22:13:45 -07:00
|
|
|
source->mute_unmute_key = obs_hotkey_pair_register_source(
|
|
|
|
source, "libobs.mute", obs->hotkeys.mute, "libobs.unmute",
|
|
|
|
obs->hotkeys.unmute, obs_source_hotkey_mute,
|
|
|
|
obs_source_hotkey_unmute, source, source);
|
2015-04-30 18:22:12 -07:00
|
|
|
|
2019-06-22 22:13:45 -07:00
|
|
|
source->push_to_mute_key = obs_hotkey_register_source(
|
|
|
|
source, "libobs.push-to-mute", obs->hotkeys.push_to_mute,
|
|
|
|
obs_source_hotkey_push_to_mute, source);
|
2015-04-30 18:22:12 -07:00
|
|
|
|
2019-06-22 22:13:45 -07:00
|
|
|
source->push_to_talk_key = obs_hotkey_register_source(
|
|
|
|
source, "libobs.push-to-talk", obs->hotkeys.push_to_talk,
|
|
|
|
obs_source_hotkey_push_to_talk, source);
|
2015-04-30 18:22:12 -07:00
|
|
|
}
|
|
|
|
|
2019-09-20 00:13:51 -07:00
|
|
|
static obs_source_t *
|
|
|
|
obs_source_create_internal(const char *id, const char *name,
|
|
|
|
obs_data_t *settings, obs_data_t *hotkey_data,
|
|
|
|
bool private, uint32_t last_obs_ver)
|
2013-09-30 19:37:13 -07:00
|
|
|
{
|
2014-07-29 09:38:55 -07:00
|
|
|
struct obs_source *source = bzalloc(sizeof(struct obs_source));
|
2013-09-30 19:37:13 -07:00
|
|
|
|
2015-12-29 15:25:45 -08:00
|
|
|
const struct obs_source_info *info = get_source_info(id);
|
2013-09-30 19:37:13 -07:00
|
|
|
if (!info) {
|
2014-07-29 08:11:49 -07:00
|
|
|
blog(LOG_ERROR, "Source ID '%s' not found", id);
|
2013-09-30 19:37:13 -07:00
|
|
|
|
2019-06-22 22:13:45 -07:00
|
|
|
source->info.id = bstrdup(id);
|
2014-07-29 09:38:55 -07:00
|
|
|
source->owns_info_id = true;
|
|
|
|
} else {
|
|
|
|
source->info = *info;
|
2016-04-09 17:43:19 -07:00
|
|
|
|
|
|
|
/* Always mark filters as private so they aren't found by
|
|
|
|
* source enum/search functions.
|
|
|
|
*
|
|
|
|
* XXX: Fix design flaws with filters */
|
|
|
|
if (info->type == OBS_SOURCE_TYPE_FILTER)
|
2019-06-22 22:13:45 -07:00
|
|
|
private
|
|
|
|
= true;
|
2014-07-29 09:38:55 -07:00
|
|
|
}
|
2013-10-24 00:57:55 -07:00
|
|
|
|
2019-06-22 22:13:45 -07:00
|
|
|
source->mute_unmute_key = OBS_INVALID_HOTKEY_PAIR_ID;
|
2015-04-30 18:22:12 -07:00
|
|
|
source->push_to_mute_key = OBS_INVALID_HOTKEY_ID;
|
|
|
|
source->push_to_talk_key = OBS_INVALID_HOTKEY_ID;
|
2019-09-20 00:13:51 -07:00
|
|
|
source->last_obs_ver = last_obs_ver;
|
2015-04-30 18:22:12 -07:00
|
|
|
|
2016-01-09 13:27:16 -08:00
|
|
|
if (!obs_source_init_context(source, settings, name, hotkey_data,
|
2019-06-22 22:13:45 -07:00
|
|
|
private))
|
2013-12-26 22:10:15 -08:00
|
|
|
goto fail;
|
|
|
|
|
2017-08-29 09:29:56 -07:00
|
|
|
if (info) {
|
2019-07-10 10:37:52 -07:00
|
|
|
if (info->get_defaults) {
|
|
|
|
info->get_defaults(source->context.settings);
|
|
|
|
}
|
|
|
|
if (info->get_defaults2) {
|
2017-08-29 09:29:56 -07:00
|
|
|
info->get_defaults2(info->type_data,
|
2019-06-22 22:13:45 -07:00
|
|
|
source->context.settings);
|
2019-07-10 10:37:52 -07:00
|
|
|
}
|
2017-08-29 09:29:56 -07:00
|
|
|
}
|
2014-01-28 17:41:24 -08:00
|
|
|
|
2015-12-17 06:46:10 -08:00
|
|
|
if (!obs_source_init(source))
|
2015-04-15 16:13:37 -07:00
|
|
|
goto fail;
|
|
|
|
|
2016-01-09 13:27:16 -08:00
|
|
|
if (!private)
|
|
|
|
obs_source_init_audio_hotkeys(source);
|
2015-04-30 18:22:12 -07:00
|
|
|
|
2014-05-04 16:20:11 -07:00
|
|
|
/* allow the source to be created even if creation fails so that the
|
|
|
|
* user's data doesn't become lost */
|
2019-08-21 14:35:40 -07:00
|
|
|
if (info && info->create)
|
2019-06-22 22:13:45 -07:00
|
|
|
source->context.data =
|
|
|
|
info->create(source->context.settings, source);
|
2019-08-25 19:12:46 -07:00
|
|
|
if ((!info || info->create) && !source->context.data)
|
2014-05-04 16:20:11 -07:00
|
|
|
blog(LOG_ERROR, "Failed to create source '%s'!", name);
|
2013-10-24 00:57:55 -07:00
|
|
|
|
2019-06-22 22:13:45 -07:00
|
|
|
blog(LOG_DEBUG, "%ssource '%s' (%s) created", private ? "private " : "",
|
|
|
|
name, id);
|
2014-12-27 21:42:03 -08:00
|
|
|
|
2017-05-13 23:30:36 -07:00
|
|
|
source->flags = source->default_flags;
|
2015-03-17 18:15:50 -07:00
|
|
|
source->enabled = true;
|
2018-05-03 12:38:16 -07:00
|
|
|
|
|
|
|
if (!private) {
|
|
|
|
obs_source_dosignal(source, "source_create", NULL);
|
|
|
|
}
|
|
|
|
|
2019-08-25 19:17:20 -07:00
|
|
|
obs_source_init_finalize(source);
|
2013-09-30 19:37:13 -07:00
|
|
|
return source;
|
2013-10-24 00:57:55 -07:00
|
|
|
|
|
|
|
fail:
|
|
|
|
blog(LOG_ERROR, "obs_source_create failed");
|
|
|
|
obs_source_destroy(source);
|
|
|
|
return NULL;
|
2013-09-30 19:37:13 -07:00
|
|
|
}
|
|
|
|
|
2016-01-09 13:27:16 -08:00
|
|
|
obs_source_t *obs_source_create(const char *id, const char *name,
|
2019-06-22 22:13:45 -07:00
|
|
|
obs_data_t *settings, obs_data_t *hotkey_data)
|
2016-01-09 13:27:16 -08:00
|
|
|
{
|
|
|
|
return obs_source_create_internal(id, name, settings, hotkey_data,
|
2019-09-20 00:13:51 -07:00
|
|
|
false, LIBOBS_API_VER);
|
2016-01-09 13:27:16 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
obs_source_t *obs_source_create_private(const char *id, const char *name,
|
2019-06-22 22:13:45 -07:00
|
|
|
obs_data_t *settings)
|
2016-01-09 13:27:16 -08:00
|
|
|
{
|
2019-09-20 00:13:51 -07:00
|
|
|
return obs_source_create_internal(id, name, settings, NULL, true,
|
|
|
|
LIBOBS_API_VER);
|
|
|
|
}
|
|
|
|
|
|
|
|
obs_source_t *obs_source_create_set_last_ver(const char *id, const char *name,
|
|
|
|
obs_data_t *settings,
|
|
|
|
obs_data_t *hotkey_data,
|
|
|
|
uint32_t last_obs_ver)
|
|
|
|
{
|
|
|
|
return obs_source_create_internal(id, name, settings, hotkey_data,
|
|
|
|
false, last_obs_ver);
|
2016-01-09 13:27:16 -08:00
|
|
|
}
|
|
|
|
|
2017-03-12 19:54:55 -07:00
|
|
|
static char *get_new_filter_name(obs_source_t *dst, const char *name)
|
|
|
|
{
|
|
|
|
struct dstr new_name = {0};
|
|
|
|
int inc = 0;
|
|
|
|
|
|
|
|
dstr_copy(&new_name, name);
|
|
|
|
|
|
|
|
for (;;) {
|
2019-06-22 22:13:45 -07:00
|
|
|
obs_source_t *existing_filter =
|
|
|
|
obs_source_get_filter_by_name(dst, new_name.array);
|
2017-03-12 19:54:55 -07:00
|
|
|
if (!existing_filter)
|
|
|
|
break;
|
|
|
|
|
|
|
|
obs_source_release(existing_filter);
|
|
|
|
|
|
|
|
dstr_printf(&new_name, "%s %d", name, ++inc + 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
return new_name.array;
|
|
|
|
}
|
|
|
|
|
2016-01-12 16:30:15 -08:00
|
|
|
static void duplicate_filters(obs_source_t *dst, obs_source_t *src,
|
2019-06-22 22:13:45 -07:00
|
|
|
bool private)
|
2016-01-12 16:30:15 -08:00
|
|
|
{
|
2019-06-22 22:13:45 -07:00
|
|
|
DARRAY(obs_source_t *) filters;
|
2016-01-12 16:30:15 -08:00
|
|
|
|
|
|
|
da_init(filters);
|
|
|
|
|
|
|
|
pthread_mutex_lock(&src->filter_mutex);
|
|
|
|
for (size_t i = 0; i < src->filters.num; i++)
|
|
|
|
obs_source_addref(src->filters.array[i]);
|
|
|
|
da_copy(filters, src->filters);
|
|
|
|
pthread_mutex_unlock(&src->filter_mutex);
|
|
|
|
|
|
|
|
for (size_t i = filters.num; i > 0; i--) {
|
|
|
|
obs_source_t *src_filter = filters.array[i - 1];
|
2019-06-22 22:13:45 -07:00
|
|
|
char *new_name =
|
|
|
|
get_new_filter_name(dst, src_filter->context.name);
|
2017-11-12 01:38:56 -08:00
|
|
|
bool enabled = obs_source_enabled(src_filter);
|
2017-03-12 19:54:55 -07:00
|
|
|
|
2019-06-22 22:13:45 -07:00
|
|
|
obs_source_t *dst_filter =
|
|
|
|
obs_source_duplicate(src_filter, new_name, private);
|
2017-11-12 01:38:56 -08:00
|
|
|
obs_source_set_enabled(dst_filter, enabled);
|
2016-01-12 16:30:15 -08:00
|
|
|
|
2017-03-12 19:54:55 -07:00
|
|
|
bfree(new_name);
|
2016-01-12 16:30:15 -08:00
|
|
|
obs_source_filter_add(dst, dst_filter);
|
|
|
|
obs_source_release(dst_filter);
|
|
|
|
obs_source_release(src_filter);
|
|
|
|
}
|
|
|
|
|
|
|
|
da_free(filters);
|
|
|
|
}
|
|
|
|
|
2017-03-12 19:54:55 -07:00
|
|
|
void obs_source_copy_filters(obs_source_t *dst, obs_source_t *src)
|
|
|
|
{
|
2018-04-18 09:11:47 -07:00
|
|
|
if (!obs_source_valid(dst, "obs_source_copy_filters"))
|
|
|
|
return;
|
|
|
|
if (!obs_source_valid(src, "obs_source_copy_filters"))
|
|
|
|
return;
|
|
|
|
|
2018-01-06 18:26:55 -08:00
|
|
|
duplicate_filters(dst, src, dst->context.private);
|
2017-03-12 19:54:55 -07:00
|
|
|
}
|
|
|
|
|
2019-06-22 22:13:45 -07:00
|
|
|
obs_source_t *obs_source_duplicate(obs_source_t *source, const char *new_name,
|
|
|
|
bool create_private)
|
2016-01-12 16:30:15 -08:00
|
|
|
{
|
|
|
|
obs_source_t *new_source;
|
|
|
|
obs_data_t *settings;
|
|
|
|
|
|
|
|
if (!obs_source_valid(source, "obs_source_duplicate"))
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
if ((source->info.output_flags & OBS_SOURCE_DO_NOT_DUPLICATE) != 0) {
|
|
|
|
obs_source_addref(source);
|
|
|
|
return source;
|
|
|
|
}
|
|
|
|
|
2016-07-01 15:20:27 -07:00
|
|
|
if (source->info.type == OBS_SOURCE_TYPE_SCENE) {
|
|
|
|
obs_scene_t *scene = obs_scene_from_source(source);
|
2018-07-15 18:58:28 -07:00
|
|
|
if (!scene)
|
|
|
|
scene = obs_group_from_source(source);
|
|
|
|
if (!scene)
|
|
|
|
return NULL;
|
|
|
|
|
2019-06-22 22:13:45 -07:00
|
|
|
obs_scene_t *new_scene = obs_scene_duplicate(
|
|
|
|
scene, new_name,
|
|
|
|
create_private ? OBS_SCENE_DUP_PRIVATE_COPY
|
|
|
|
: OBS_SCENE_DUP_COPY);
|
2016-11-05 11:51:06 -07:00
|
|
|
obs_source_t *new_source = obs_scene_get_source(new_scene);
|
|
|
|
return new_source;
|
2016-07-01 15:20:27 -07:00
|
|
|
}
|
|
|
|
|
2016-01-12 16:30:15 -08:00
|
|
|
settings = obs_data_create();
|
|
|
|
obs_data_apply(settings, source->context.settings);
|
|
|
|
|
2019-06-22 22:13:45 -07:00
|
|
|
new_source = create_private
|
|
|
|
? obs_source_create_private(source->info.id,
|
|
|
|
new_name, settings)
|
|
|
|
: obs_source_create(source->info.id, new_name,
|
|
|
|
settings, NULL);
|
2016-01-12 16:30:15 -08:00
|
|
|
|
|
|
|
new_source->audio_mixers = source->audio_mixers;
|
|
|
|
new_source->sync_offset = source->sync_offset;
|
|
|
|
new_source->user_volume = source->user_volume;
|
|
|
|
new_source->user_muted = source->user_muted;
|
|
|
|
new_source->volume = source->volume;
|
|
|
|
new_source->muted = source->muted;
|
|
|
|
new_source->flags = source->flags;
|
|
|
|
|
2017-11-30 05:04:39 -08:00
|
|
|
obs_data_apply(new_source->private_settings, source->private_settings);
|
|
|
|
|
2016-01-12 16:30:15 -08:00
|
|
|
if (source->info.type != OBS_SOURCE_TYPE_FILTER)
|
|
|
|
duplicate_filters(new_source, source, create_private);
|
|
|
|
|
|
|
|
obs_data_release(settings);
|
|
|
|
return new_source;
|
|
|
|
}
|
|
|
|
|
2014-08-02 01:04:45 -07:00
|
|
|
void obs_source_frame_init(struct obs_source_frame *frame,
|
2019-06-22 22:13:45 -07:00
|
|
|
enum video_format format, uint32_t width,
|
|
|
|
uint32_t height)
|
2014-02-07 02:03:54 -08:00
|
|
|
{
|
2014-02-18 12:37:56 -08:00
|
|
|
struct video_frame vid_frame;
|
2014-02-23 21:39:33 -08:00
|
|
|
|
2015-10-17 02:51:13 -07:00
|
|
|
if (!obs_ptr_valid(frame, "obs_source_frame_init"))
|
2014-02-23 21:39:33 -08:00
|
|
|
return;
|
|
|
|
|
2014-02-18 12:37:56 -08:00
|
|
|
video_frame_init(&vid_frame, format, width, height);
|
2014-02-09 04:51:06 -08:00
|
|
|
frame->format = format;
|
2019-06-22 22:13:45 -07:00
|
|
|
frame->width = width;
|
2014-02-09 04:51:06 -08:00
|
|
|
frame->height = height;
|
2014-02-07 02:03:54 -08:00
|
|
|
|
2014-02-18 12:37:56 -08:00
|
|
|
for (size_t i = 0; i < MAX_AV_PLANES; i++) {
|
2019-06-22 22:13:45 -07:00
|
|
|
frame->data[i] = vid_frame.data[i];
|
2014-02-18 12:37:56 -08:00
|
|
|
frame->linesize[i] = vid_frame.linesize[i];
|
2014-02-07 02:03:54 -08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-03-07 08:47:49 -08:00
|
|
|
static inline void obs_source_frame_decref(struct obs_source_frame *frame)
|
|
|
|
{
|
|
|
|
if (os_atomic_dec_long(&frame->refs) == 0)
|
|
|
|
obs_source_frame_destroy(frame);
|
|
|
|
}
|
|
|
|
|
2015-03-17 18:23:04 -07:00
|
|
|
static bool obs_source_filter_remove_refless(obs_source_t *source,
|
2019-06-22 22:13:45 -07:00
|
|
|
obs_source_t *filter);
|
2015-03-17 18:23:04 -07:00
|
|
|
|
libobs: Add services API, reduce repeated code
Add API for streaming services. The services API simplifies the
creation of custom service features and user interface.
Custom streaming services later on will be able to do things such as:
- Be able to use service-specific APIs via modules, allowing a more
direct means of communicating with the service and requesting or
setting service-specific information
- Get URL/stream key via other means of authentication such as OAuth,
or be able to build custom URLs for services that require that sort
of thing.
- Query information (such as viewer count, chat, follower
notifications, and other information)
- Set channel information (such as current game, current channel title,
activating commercials)
Also, I reduce some repeated code that was used for all libobs objects.
This includes the name of the object, the private data, settings, as
well as the signal and procedure handlers.
I also switched to using linked lists for the global object lists,
rather than using an array of pointers (you could say it was..
pointless.) ..Anyway, the linked list info is also stored in the shared
context data structure.
2014-04-19 20:38:53 -07:00
|
|
|
void obs_source_destroy(struct obs_source *source)
|
2013-09-30 19:37:13 -07:00
|
|
|
{
|
2013-11-20 14:00:16 -08:00
|
|
|
size_t i;
|
2013-10-24 00:57:55 -07:00
|
|
|
|
2015-10-17 02:51:13 -07:00
|
|
|
if (!obs_source_valid(source, "obs_source_destroy"))
|
2014-02-23 21:39:33 -08:00
|
|
|
return;
|
|
|
|
|
libobs: Implement transition sources
Transition sources are implemented by registering a source type as
OBS_SOURCE_TYPE_TRANSITION. They're automatically marked as video
composite sources, and video_render/audio_render callbacks must be set
when registering the source. get_width and get_height callbacks are
unused for these types of sources, as transitions automatically handle
width/height behind the scenes with the transition settings.
In the video_render callback, the helper function
obs_transition_video_render is used to assist in automatically
processing and rendering the audio. A render callback is passed to the
function, which in turn passes to/from textures that are automatically
rendered in the back-end.
Similarly, in the audio_render callback, the helper function
obs_transition_audio_render is used to assist in automatically
processing and rendering the audio. Two mix callbacks are used to
handle how the source/destination sources are mixed together. To ensure
the best possible quality, audio processing is per-sample.
Transitions can be set to automatically resize, or they can be set to
have a fixed size. Sources within transitions can be made to scale to
the transition size (with or without aspect ratio), or to not scale
unless they're bigger than the transition. They can have a specific
alignment within the transition, or they just default to top-left.
These features are implemented for the purpose of extending transitions
to also act as "switch" sources later, where you can switch to/from two
different sources using the transition animation.
Planned (but not yet implemented and lower priority) features:
- "Switch" transitions which allow the ability to switch back and forth
between two sources with a transitioning animation without discarding
the references
- Easing options to allow the option to transition with a bezier or
custom curve
- Manual transitioning to allow the front-end/user to manually control
the transition offset
2016-01-03 16:41:14 -08:00
|
|
|
if (source->info.type == OBS_SOURCE_TYPE_TRANSITION)
|
|
|
|
obs_transition_clear(source);
|
|
|
|
|
2015-12-17 04:00:14 -08:00
|
|
|
pthread_mutex_lock(&obs->data.audio_sources_mutex);
|
|
|
|
if (source->prev_next_audio_source) {
|
|
|
|
*source->prev_next_audio_source = source->next_audio_source;
|
|
|
|
if (source->next_audio_source)
|
|
|
|
source->next_audio_source->prev_next_audio_source =
|
|
|
|
source->prev_next_audio_source;
|
|
|
|
}
|
|
|
|
pthread_mutex_unlock(&obs->data.audio_sources_mutex);
|
|
|
|
|
2015-03-17 18:23:04 -07:00
|
|
|
if (source->filter_parent)
|
|
|
|
obs_source_filter_remove_refless(source->filter_parent, source);
|
|
|
|
|
|
|
|
while (source->filters.num)
|
|
|
|
obs_source_filter_remove(source, source->filters.array[0]);
|
|
|
|
|
libobs: Add services API, reduce repeated code
Add API for streaming services. The services API simplifies the
creation of custom service features and user interface.
Custom streaming services later on will be able to do things such as:
- Be able to use service-specific APIs via modules, allowing a more
direct means of communicating with the service and requesting or
setting service-specific information
- Get URL/stream key via other means of authentication such as OAuth,
or be able to build custom URLs for services that require that sort
of thing.
- Query information (such as viewer count, chat, follower
notifications, and other information)
- Set channel information (such as current game, current channel title,
activating commercials)
Also, I reduce some repeated code that was used for all libobs objects.
This includes the name of the object, the private data, settings, as
well as the signal and procedure handlers.
I also switched to using linked lists for the global object lists,
rather than using an array of pointers (you could say it was..
pointless.) ..Anyway, the linked list info is also stored in the shared
context data structure.
2014-04-19 20:38:53 -07:00
|
|
|
obs_context_data_remove(&source->context);
|
|
|
|
|
2016-08-05 15:36:10 -07:00
|
|
|
blog(LOG_DEBUG, "%ssource '%s' destroyed",
|
2019-06-22 22:13:45 -07:00
|
|
|
source->context.private ? "private " : "", source->context.name);
|
2014-07-13 05:01:02 -07:00
|
|
|
|
2014-03-01 04:54:55 -08:00
|
|
|
obs_source_dosignal(source, "source_destroy", "destroy");
|
2013-12-27 04:08:58 -08:00
|
|
|
|
2014-05-04 16:20:11 -07:00
|
|
|
if (source->context.data) {
|
2014-05-04 12:01:46 -07:00
|
|
|
source->info.destroy(source->context.data);
|
2014-05-04 16:20:11 -07:00
|
|
|
source->context.data = NULL;
|
|
|
|
}
|
2014-05-04 12:01:46 -07:00
|
|
|
|
2017-02-05 21:37:35 -08:00
|
|
|
audio_monitor_destroy(source->monitor);
|
|
|
|
|
2015-04-30 18:22:12 -07:00
|
|
|
obs_hotkey_unregister(source->push_to_talk_key);
|
|
|
|
obs_hotkey_unregister(source->push_to_mute_key);
|
|
|
|
obs_hotkey_pair_unregister(source->mute_unmute_key);
|
|
|
|
|
2015-01-04 00:18:36 -08:00
|
|
|
for (i = 0; i < source->async_cache.num; i++)
|
2015-03-07 08:47:49 -08:00
|
|
|
obs_source_frame_decref(source->async_cache.array[i].frame);
|
2013-10-24 00:57:55 -07:00
|
|
|
|
2014-08-07 23:42:07 -07:00
|
|
|
gs_enter_context(obs->video.graphics);
|
2016-03-15 20:37:19 -07:00
|
|
|
if (source->async_texrender)
|
|
|
|
gs_texrender_destroy(source->async_texrender);
|
2016-03-15 20:39:36 -07:00
|
|
|
if (source->async_prev_texrender)
|
|
|
|
gs_texrender_destroy(source->async_prev_texrender);
|
2019-08-09 20:43:14 -07:00
|
|
|
for (size_t c = 0; c < MAX_AV_PLANES; c++) {
|
|
|
|
gs_texture_destroy(source->async_textures[c]);
|
|
|
|
gs_texture_destroy(source->async_prev_textures[c]);
|
|
|
|
}
|
2015-10-17 06:06:38 -07:00
|
|
|
if (source->filter_texrender)
|
|
|
|
gs_texrender_destroy(source->filter_texrender);
|
2014-08-07 23:42:07 -07:00
|
|
|
gs_leave_context();
|
2013-09-30 19:37:13 -07:00
|
|
|
|
2014-02-14 14:13:36 -08:00
|
|
|
for (i = 0; i < MAX_AV_PLANES; i++)
|
2014-02-07 02:03:54 -08:00
|
|
|
bfree(source->audio_data.data[i]);
|
2015-12-17 04:28:35 -08:00
|
|
|
for (i = 0; i < MAX_AUDIO_CHANNELS; i++)
|
|
|
|
circlebuf_free(&source->audio_input_buf[i]);
|
2013-11-20 14:00:16 -08:00
|
|
|
audio_resampler_destroy(source->resampler);
|
2015-12-17 04:28:35 -08:00
|
|
|
bfree(source->audio_output_buf[0][0]);
|
2019-08-21 14:35:40 -07:00
|
|
|
bfree(source->audio_mix_buf[0]);
|
2013-11-20 14:00:16 -08:00
|
|
|
|
2017-03-26 05:51:45 -07:00
|
|
|
obs_source_frame_destroy(source->async_preload_frame);
|
|
|
|
|
libobs: Implement transition sources
Transition sources are implemented by registering a source type as
OBS_SOURCE_TYPE_TRANSITION. They're automatically marked as video
composite sources, and video_render/audio_render callbacks must be set
when registering the source. get_width and get_height callbacks are
unused for these types of sources, as transitions automatically handle
width/height behind the scenes with the transition settings.
In the video_render callback, the helper function
obs_transition_video_render is used to assist in automatically
processing and rendering the audio. A render callback is passed to the
function, which in turn passes to/from textures that are automatically
rendered in the back-end.
Similarly, in the audio_render callback, the helper function
obs_transition_audio_render is used to assist in automatically
processing and rendering the audio. Two mix callbacks are used to
handle how the source/destination sources are mixed together. To ensure
the best possible quality, audio processing is per-sample.
Transitions can be set to automatically resize, or they can be set to
have a fixed size. Sources within transitions can be made to scale to
the transition size (with or without aspect ratio), or to not scale
unless they're bigger than the transition. They can have a specific
alignment within the transition, or they just default to top-left.
These features are implemented for the purpose of extending transitions
to also act as "switch" sources later, where you can switch to/from two
different sources using the transition animation.
Planned (but not yet implemented and lower priority) features:
- "Switch" transitions which allow the ability to switch back and forth
between two sources with a transitioning animation without discarding
the references
- Easing options to allow the option to transition with a bezier or
custom curve
- Manual transitioning to allow the front-end/user to manually control
the transition offset
2016-01-03 16:41:14 -08:00
|
|
|
if (source->info.type == OBS_SOURCE_TYPE_TRANSITION)
|
|
|
|
obs_transition_free(source);
|
|
|
|
|
libobs: Implement new audio subsystem
The new audio subsystem fixes two issues:
- First Primary issue it fixes is the ability for parent sources to
intercept the audio of child sources, and do custom processing on
them. The main reason for this was the ability to do custom
cross-fading in transitions, but it's also useful for things such as
side-chain effects, applying audio effects to entire scenes, applying
scene-specific audio filters on sub-sources, and other such
possibilities.
- The secondary issue that needed fixing was audio buffering.
Previously, audio buffering was always a fixed buffer size, so it
would always have exactly a certain number of milliseconds of audio
buffering (and thus output delay). Instead, it now dynamically
increases audio buffering only as necessary, minimizing output delay,
and removing the need for users to have to worry about an audio
buffering setting.
The new design makes it so that audio from the leaves of the scene graph
flow to the root nodes, and can be intercepted by parent sources. Each
audio source handles its own buffering, and each audio tick a specific
number of audio frames are popped from the front of the circular buffer
on each audio source. Composite sources (such as scenes) can access the
audio for child sources and do custom processing or mixing on that
audio. Composite sources use the audio_render callback of sources to do
synchronous or deferred audio processing per audio tick. Things like
scenes now mix audio from their sub-sources.
2015-12-20 03:06:35 -08:00
|
|
|
da_free(source->audio_actions);
|
2016-01-07 19:48:36 -08:00
|
|
|
da_free(source->audio_cb_list);
|
2015-01-04 00:18:36 -08:00
|
|
|
da_free(source->async_cache);
|
2015-01-03 23:19:09 -08:00
|
|
|
da_free(source->async_frames);
|
2013-11-20 14:00:16 -08:00
|
|
|
da_free(source->filters);
|
|
|
|
pthread_mutex_destroy(&source->filter_mutex);
|
libobs: Implement new audio subsystem
The new audio subsystem fixes two issues:
- First Primary issue it fixes is the ability for parent sources to
intercept the audio of child sources, and do custom processing on
them. The main reason for this was the ability to do custom
cross-fading in transitions, but it's also useful for things such as
side-chain effects, applying audio effects to entire scenes, applying
scene-specific audio filters on sub-sources, and other such
possibilities.
- The secondary issue that needed fixing was audio buffering.
Previously, audio buffering was always a fixed buffer size, so it
would always have exactly a certain number of milliseconds of audio
buffering (and thus output delay). Instead, it now dynamically
increases audio buffering only as necessary, minimizing output delay,
and removing the need for users to have to worry about an audio
buffering setting.
The new design makes it so that audio from the leaves of the scene graph
flow to the root nodes, and can be intercepted by parent sources. Each
audio source handles its own buffering, and each audio tick a specific
number of audio frames are popped from the front of the circular buffer
on each audio source. Composite sources (such as scenes) can access the
audio for child sources and do custom processing or mixing on that
audio. Composite sources use the audio_render callback of sources to do
synchronous or deferred audio processing per audio tick. Things like
scenes now mix audio from their sub-sources.
2015-12-20 03:06:35 -08:00
|
|
|
pthread_mutex_destroy(&source->audio_actions_mutex);
|
2015-12-17 04:28:35 -08:00
|
|
|
pthread_mutex_destroy(&source->audio_buf_mutex);
|
2016-01-07 19:48:36 -08:00
|
|
|
pthread_mutex_destroy(&source->audio_cb_mutex);
|
2013-11-20 14:00:16 -08:00
|
|
|
pthread_mutex_destroy(&source->audio_mutex);
|
2015-01-03 23:19:09 -08:00
|
|
|
pthread_mutex_destroy(&source->async_mutex);
|
2017-09-13 03:12:56 -07:00
|
|
|
obs_data_release(source->private_settings);
|
libobs: Add services API, reduce repeated code
Add API for streaming services. The services API simplifies the
creation of custom service features and user interface.
Custom streaming services later on will be able to do things such as:
- Be able to use service-specific APIs via modules, allowing a more
direct means of communicating with the service and requesting or
setting service-specific information
- Get URL/stream key via other means of authentication such as OAuth,
or be able to build custom URLs for services that require that sort
of thing.
- Query information (such as viewer count, chat, follower
notifications, and other information)
- Set channel information (such as current game, current channel title,
activating commercials)
Also, I reduce some repeated code that was used for all libobs objects.
This includes the name of the object, the private data, settings, as
well as the signal and procedure handlers.
I also switched to using linked lists for the global object lists,
rather than using an array of pointers (you could say it was..
pointless.) ..Anyway, the linked list info is also stored in the shared
context data structure.
2014-04-19 20:38:53 -07:00
|
|
|
obs_context_data_free(&source->context);
|
2014-12-13 05:10:45 -08:00
|
|
|
|
2014-07-29 09:38:55 -07:00
|
|
|
if (source->owns_info_id)
|
2019-06-22 22:13:45 -07:00
|
|
|
bfree((void *)source->info.id);
|
2014-07-29 09:38:55 -07:00
|
|
|
|
2013-11-20 14:00:16 -08:00
|
|
|
bfree(source);
|
|
|
|
}
|
|
|
|
|
2014-09-25 17:44:05 -07:00
|
|
|
void obs_source_addref(obs_source_t *source)
|
2013-11-20 14:00:16 -08:00
|
|
|
{
|
2015-05-03 11:45:41 -07:00
|
|
|
if (!source)
|
|
|
|
return;
|
|
|
|
|
|
|
|
obs_ref_addref(&source->control->ref);
|
2013-11-20 14:00:16 -08:00
|
|
|
}
|
|
|
|
|
2014-09-25 17:44:05 -07:00
|
|
|
void obs_source_release(obs_source_t *source)
|
2013-11-20 14:00:16 -08:00
|
|
|
{
|
2015-04-30 05:14:38 -07:00
|
|
|
if (!obs) {
|
|
|
|
blog(LOG_WARNING, "Tried to release a source when the OBS "
|
2019-06-22 22:13:45 -07:00
|
|
|
"core is shut down!");
|
2015-04-30 05:14:38 -07:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2014-02-02 17:34:06 -08:00
|
|
|
if (!source)
|
|
|
|
return;
|
2013-11-20 17:36:46 -08:00
|
|
|
|
2015-05-03 11:45:41 -07:00
|
|
|
obs_weak_source_t *control = source->control;
|
|
|
|
if (obs_ref_release(&control->ref)) {
|
2014-02-02 17:34:06 -08:00
|
|
|
obs_source_destroy(source);
|
2015-05-03 11:45:41 -07:00
|
|
|
obs_weak_source_release(control);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void obs_weak_source_addref(obs_weak_source_t *weak)
|
|
|
|
{
|
|
|
|
if (!weak)
|
|
|
|
return;
|
|
|
|
|
|
|
|
obs_weak_ref_addref(&weak->ref);
|
|
|
|
}
|
|
|
|
|
|
|
|
void obs_weak_source_release(obs_weak_source_t *weak)
|
|
|
|
{
|
|
|
|
if (!weak)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (obs_weak_ref_release(&weak->ref))
|
|
|
|
bfree(weak);
|
|
|
|
}
|
|
|
|
|
|
|
|
obs_source_t *obs_source_get_ref(obs_source_t *source)
|
|
|
|
{
|
|
|
|
if (!source)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
return obs_weak_source_get_source(source->control);
|
|
|
|
}
|
|
|
|
|
|
|
|
obs_weak_source_t *obs_source_get_weak_source(obs_source_t *source)
|
|
|
|
{
|
|
|
|
if (!source)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
obs_weak_source_t *weak = source->control;
|
|
|
|
obs_weak_source_addref(weak);
|
|
|
|
return weak;
|
|
|
|
}
|
|
|
|
|
|
|
|
obs_source_t *obs_weak_source_get_source(obs_weak_source_t *weak)
|
|
|
|
{
|
|
|
|
if (!weak)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
if (obs_weak_ref_get_ref(&weak->ref))
|
|
|
|
return weak->source;
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool obs_weak_source_references_source(obs_weak_source_t *weak,
|
2019-06-22 22:13:45 -07:00
|
|
|
obs_source_t *source)
|
2015-05-03 11:45:41 -07:00
|
|
|
{
|
|
|
|
return weak && source && weak->source == source;
|
2013-11-20 17:36:46 -08:00
|
|
|
}
|
|
|
|
|
2014-09-25 17:44:05 -07:00
|
|
|
void obs_source_remove(obs_source_t *source)
|
2013-11-20 17:36:46 -08:00
|
|
|
{
|
2015-10-17 02:51:13 -07:00
|
|
|
if (!obs_source_valid(source, "obs_source_remove"))
|
|
|
|
return;
|
|
|
|
|
2015-10-29 13:26:19 -07:00
|
|
|
if (!source->removed) {
|
|
|
|
source->removed = true;
|
libobs: Add services API, reduce repeated code
Add API for streaming services. The services API simplifies the
creation of custom service features and user interface.
Custom streaming services later on will be able to do things such as:
- Be able to use service-specific APIs via modules, allowing a more
direct means of communicating with the service and requesting or
setting service-specific information
- Get URL/stream key via other means of authentication such as OAuth,
or be able to build custom URLs for services that require that sort
of thing.
- Query information (such as viewer count, chat, follower
notifications, and other information)
- Set channel information (such as current game, current channel title,
activating commercials)
Also, I reduce some repeated code that was used for all libobs objects.
This includes the name of the object, the private data, settings, as
well as the signal and procedure handlers.
I also switched to using linked lists for the global object lists,
rather than using an array of pointers (you could say it was..
pointless.) ..Anyway, the linked list info is also stored in the shared
context data structure.
2014-04-19 20:38:53 -07:00
|
|
|
obs_source_dosignal(source, "source_remove", "remove");
|
2015-10-29 13:26:19 -07:00
|
|
|
}
|
2013-11-20 14:00:16 -08:00
|
|
|
}
|
|
|
|
|
2014-09-26 15:25:59 -07:00
|
|
|
bool obs_source_removed(const obs_source_t *source)
|
2013-11-20 14:00:16 -08:00
|
|
|
{
|
2019-06-22 22:13:45 -07:00
|
|
|
return obs_source_valid(source, "obs_source_removed") ? source->removed
|
|
|
|
: true;
|
2013-09-30 19:37:13 -07:00
|
|
|
}
|
|
|
|
|
2014-09-25 17:44:05 -07:00
|
|
|
static inline obs_data_t *get_defaults(const struct obs_source_info *info)
|
2014-04-04 00:30:37 -07:00
|
|
|
{
|
2014-09-25 17:44:05 -07:00
|
|
|
obs_data_t *settings = obs_data_create();
|
2017-08-29 09:29:56 -07:00
|
|
|
if (info->get_defaults2)
|
|
|
|
info->get_defaults2(info->type_data, settings);
|
|
|
|
else if (info->get_defaults)
|
2014-08-04 21:27:52 -07:00
|
|
|
info->get_defaults(settings);
|
2014-04-04 00:30:37 -07:00
|
|
|
return settings;
|
|
|
|
}
|
|
|
|
|
2015-12-29 15:25:45 -08:00
|
|
|
obs_data_t *obs_source_settings(const char *id)
|
2014-03-07 05:55:21 -08:00
|
|
|
{
|
2015-12-29 15:25:45 -08:00
|
|
|
const struct obs_source_info *info = get_source_info(id);
|
2014-04-04 00:30:37 -07:00
|
|
|
return (info) ? get_defaults(info) : NULL;
|
2014-03-07 05:55:21 -08:00
|
|
|
}
|
|
|
|
|
2015-12-29 15:25:45 -08:00
|
|
|
obs_data_t *obs_get_source_defaults(const char *id)
|
2015-03-23 09:04:04 -07:00
|
|
|
{
|
2015-12-29 15:25:45 -08:00
|
|
|
const struct obs_source_info *info = get_source_info(id);
|
2015-03-23 09:04:04 -07:00
|
|
|
return info ? get_defaults(info) : NULL;
|
|
|
|
}
|
|
|
|
|
2015-12-29 15:25:45 -08:00
|
|
|
obs_properties_t *obs_get_source_properties(const char *id)
|
2014-02-01 21:46:13 -08:00
|
|
|
{
|
2015-12-29 15:25:45 -08:00
|
|
|
const struct obs_source_info *info = get_source_info(id);
|
2017-08-29 09:45:36 -07:00
|
|
|
if (info && (info->get_properties || info->get_properties2)) {
|
2019-06-22 22:13:45 -07:00
|
|
|
obs_data_t *defaults = get_defaults(info);
|
2017-08-29 09:45:36 -07:00
|
|
|
obs_properties_t *props;
|
|
|
|
|
|
|
|
if (info->get_properties2)
|
|
|
|
props = info->get_properties2(NULL, info->type_data);
|
|
|
|
else
|
|
|
|
props = info->get_properties(NULL);
|
2014-04-04 00:30:37 -07:00
|
|
|
|
2017-08-29 09:45:36 -07:00
|
|
|
obs_properties_apply_settings(props, defaults);
|
2014-04-04 00:30:37 -07:00
|
|
|
obs_data_release(defaults);
|
2017-08-29 09:45:36 -07:00
|
|
|
return props;
|
2014-04-04 00:30:37 -07:00
|
|
|
}
|
2014-02-01 21:46:13 -08:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2016-01-10 17:07:01 -08:00
|
|
|
bool obs_is_source_configurable(const char *id)
|
|
|
|
{
|
|
|
|
const struct obs_source_info *info = get_source_info(id);
|
2017-08-29 09:45:36 -07:00
|
|
|
return info && (info->get_properties || info->get_properties2);
|
2016-01-10 17:07:01 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
bool obs_source_configurable(const obs_source_t *source)
|
|
|
|
{
|
|
|
|
return data_valid(source, "obs_source_configurable") &&
|
2019-06-22 22:13:45 -07:00
|
|
|
(source->info.get_properties || source->info.get_properties2);
|
2016-01-10 17:07:01 -08:00
|
|
|
}
|
|
|
|
|
2014-09-26 15:25:59 -07:00
|
|
|
obs_properties_t *obs_source_properties(const obs_source_t *source)
|
2014-03-23 01:07:54 -07:00
|
|
|
{
|
2015-10-16 18:49:45 -07:00
|
|
|
if (!data_valid(source, "obs_source_properties"))
|
|
|
|
return NULL;
|
|
|
|
|
2017-08-29 09:45:36 -07:00
|
|
|
if (source->info.get_properties2) {
|
|
|
|
obs_properties_t *props;
|
|
|
|
props = source->info.get_properties2(source->context.data,
|
2019-06-22 22:13:45 -07:00
|
|
|
source->info.type_data);
|
2017-08-29 09:45:36 -07:00
|
|
|
obs_properties_apply_settings(props, source->context.settings);
|
|
|
|
return props;
|
|
|
|
|
|
|
|
} else if (source->info.get_properties) {
|
2014-09-25 17:44:05 -07:00
|
|
|
obs_properties_t *props;
|
2014-09-29 08:36:13 -07:00
|
|
|
props = source->info.get_properties(source->context.data);
|
libobs: Add services API, reduce repeated code
Add API for streaming services. The services API simplifies the
creation of custom service features and user interface.
Custom streaming services later on will be able to do things such as:
- Be able to use service-specific APIs via modules, allowing a more
direct means of communicating with the service and requesting or
setting service-specific information
- Get URL/stream key via other means of authentication such as OAuth,
or be able to build custom URLs for services that require that sort
of thing.
- Query information (such as viewer count, chat, follower
notifications, and other information)
- Set channel information (such as current game, current channel title,
activating commercials)
Also, I reduce some repeated code that was used for all libobs objects.
This includes the name of the object, the private data, settings, as
well as the signal and procedure handlers.
I also switched to using linked lists for the global object lists,
rather than using an array of pointers (you could say it was..
pointless.) ..Anyway, the linked list info is also stored in the shared
context data structure.
2014-04-19 20:38:53 -07:00
|
|
|
obs_properties_apply_settings(props, source->context.settings);
|
2014-04-04 00:30:37 -07:00
|
|
|
return props;
|
|
|
|
}
|
|
|
|
|
2014-03-23 01:07:54 -07:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2014-09-26 15:25:59 -07:00
|
|
|
uint32_t obs_source_get_output_flags(const obs_source_t *source)
|
2013-09-30 19:37:13 -07:00
|
|
|
{
|
2019-06-22 22:13:45 -07:00
|
|
|
return obs_source_valid(source, "obs_source_get_output_flags")
|
|
|
|
? source->info.output_flags
|
|
|
|
: 0;
|
2013-09-30 19:37:13 -07:00
|
|
|
}
|
|
|
|
|
2015-12-29 15:25:45 -08:00
|
|
|
uint32_t obs_get_source_output_flags(const char *id)
|
2015-03-23 09:05:05 -07:00
|
|
|
{
|
2015-12-29 15:25:45 -08:00
|
|
|
const struct obs_source_info *info = get_source_info(id);
|
2015-03-23 09:05:05 -07:00
|
|
|
return info ? info->output_flags : 0;
|
|
|
|
}
|
|
|
|
|
2014-09-25 17:44:05 -07:00
|
|
|
static void obs_source_deferred_update(obs_source_t *source)
|
Implement RTMP module (still needs drop code)
- Implement the RTMP output module. This time around, we just use a
simple FLV muxer, then just write to the stream with RTMP_Write.
Easy and effective.
- Fix the FLV muxer, the muxer now outputs proper FLV packets.
- Output API:
* When using encoders, automatically interleave encoded packets
before sending it to the output.
* Pair encoders and have them automatically wait for the other to
start to ensure sync.
* Change 'obs_output_signal_start_fail' to 'obs_output_signal_stop'
because it was a bit confusing, and doing this makes a lot more
sense for outputs that need to stop suddenly (disconnections/etc).
- Encoder API:
* Remove some unnecessary encoder functions from the actual API and
make them internal. Most of the encoder functions are handled
automatically by outputs anyway, so there's no real need to expose
them and end up inadvertently confusing plugin writers.
* Have audio encoders wait for the video encoder to get a frame, then
start at the exact data point that the first video frame starts to
ensure the most accrate sync of video/audio possible.
* Add a required 'frame_size' callback for audio encoders that
returns the expected number of frames desired to encode with. This
way, the libobs encoder API can handle the circular buffering
internally automatically for the encoder modules, so encoder
writers don't have to do it themselves.
- Fix a few bugs in the serializer interface. It was passing the wrong
variable for the data in a few cases.
- If a source has video, make obs_source_update defer the actual update
callback until the tick function is called to prevent threading
issues.
2014-04-07 22:00:10 -07:00
|
|
|
{
|
2014-05-04 16:20:11 -07:00
|
|
|
if (source->context.data && source->info.update)
|
|
|
|
source->info.update(source->context.data,
|
2019-06-22 22:13:45 -07:00
|
|
|
source->context.settings);
|
2014-05-04 16:20:11 -07:00
|
|
|
|
Implement RTMP module (still needs drop code)
- Implement the RTMP output module. This time around, we just use a
simple FLV muxer, then just write to the stream with RTMP_Write.
Easy and effective.
- Fix the FLV muxer, the muxer now outputs proper FLV packets.
- Output API:
* When using encoders, automatically interleave encoded packets
before sending it to the output.
* Pair encoders and have them automatically wait for the other to
start to ensure sync.
* Change 'obs_output_signal_start_fail' to 'obs_output_signal_stop'
because it was a bit confusing, and doing this makes a lot more
sense for outputs that need to stop suddenly (disconnections/etc).
- Encoder API:
* Remove some unnecessary encoder functions from the actual API and
make them internal. Most of the encoder functions are handled
automatically by outputs anyway, so there's no real need to expose
them and end up inadvertently confusing plugin writers.
* Have audio encoders wait for the video encoder to get a frame, then
start at the exact data point that the first video frame starts to
ensure the most accrate sync of video/audio possible.
* Add a required 'frame_size' callback for audio encoders that
returns the expected number of frames desired to encode with. This
way, the libobs encoder API can handle the circular buffering
internally automatically for the encoder modules, so encoder
writers don't have to do it themselves.
- Fix a few bugs in the serializer interface. It was passing the wrong
variable for the data in a few cases.
- If a source has video, make obs_source_update defer the actual update
callback until the tick function is called to prevent threading
issues.
2014-04-07 22:00:10 -07:00
|
|
|
source->defer_update = false;
|
|
|
|
}
|
|
|
|
|
2014-09-25 17:44:05 -07:00
|
|
|
void obs_source_update(obs_source_t *source, obs_data_t *settings)
|
2013-09-30 19:37:13 -07:00
|
|
|
{
|
2015-10-17 02:51:13 -07:00
|
|
|
if (!obs_source_valid(source, "obs_source_update"))
|
|
|
|
return;
|
2014-02-23 21:39:33 -08:00
|
|
|
|
2014-05-30 03:24:15 -07:00
|
|
|
if (settings)
|
|
|
|
obs_data_apply(source->context.settings, settings);
|
|
|
|
|
|
|
|
if (source->info.output_flags & OBS_SOURCE_VIDEO) {
|
|
|
|
source->defer_update = true;
|
|
|
|
} else if (source->context.data && source->info.update) {
|
|
|
|
source->info.update(source->context.data,
|
2019-06-22 22:13:45 -07:00
|
|
|
source->context.settings);
|
Implement RTMP module (still needs drop code)
- Implement the RTMP output module. This time around, we just use a
simple FLV muxer, then just write to the stream with RTMP_Write.
Easy and effective.
- Fix the FLV muxer, the muxer now outputs proper FLV packets.
- Output API:
* When using encoders, automatically interleave encoded packets
before sending it to the output.
* Pair encoders and have them automatically wait for the other to
start to ensure sync.
* Change 'obs_output_signal_start_fail' to 'obs_output_signal_stop'
because it was a bit confusing, and doing this makes a lot more
sense for outputs that need to stop suddenly (disconnections/etc).
- Encoder API:
* Remove some unnecessary encoder functions from the actual API and
make them internal. Most of the encoder functions are handled
automatically by outputs anyway, so there's no real need to expose
them and end up inadvertently confusing plugin writers.
* Have audio encoders wait for the video encoder to get a frame, then
start at the exact data point that the first video frame starts to
ensure the most accrate sync of video/audio possible.
* Add a required 'frame_size' callback for audio encoders that
returns the expected number of frames desired to encode with. This
way, the libobs encoder API can handle the circular buffering
internally automatically for the encoder modules, so encoder
writers don't have to do it themselves.
- Fix a few bugs in the serializer interface. It was passing the wrong
variable for the data in a few cases.
- If a source has video, make obs_source_update defer the actual update
callback until the tick function is called to prevent threading
issues.
2014-04-07 22:00:10 -07:00
|
|
|
}
|
2013-09-30 19:37:13 -07:00
|
|
|
}
|
|
|
|
|
2014-09-30 06:40:46 -07:00
|
|
|
void obs_source_update_properties(obs_source_t *source)
|
|
|
|
{
|
2015-10-17 02:51:13 -07:00
|
|
|
if (!obs_source_valid(source, "obs_source_update_properties"))
|
|
|
|
return;
|
2014-09-30 06:40:46 -07:00
|
|
|
|
2016-01-18 20:01:58 -08:00
|
|
|
obs_source_dosignal(source, NULL, "update_properties");
|
2014-09-30 06:40:46 -07:00
|
|
|
}
|
|
|
|
|
2014-09-25 17:44:05 -07:00
|
|
|
void obs_source_send_mouse_click(obs_source_t *source,
|
2019-06-22 22:13:45 -07:00
|
|
|
const struct obs_mouse_event *event,
|
|
|
|
int32_t type, bool mouse_up,
|
|
|
|
uint32_t click_count)
|
2014-09-14 13:31:57 -07:00
|
|
|
{
|
2015-10-17 02:51:13 -07:00
|
|
|
if (!obs_source_valid(source, "obs_source_send_mouse_click"))
|
2014-09-14 13:31:57 -07:00
|
|
|
return;
|
|
|
|
|
|
|
|
if (source->info.output_flags & OBS_SOURCE_INTERACTION) {
|
|
|
|
if (source->info.mouse_click) {
|
2019-06-22 22:13:45 -07:00
|
|
|
source->info.mouse_click(source->context.data, event,
|
|
|
|
type, mouse_up, click_count);
|
2014-09-14 13:31:57 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-09-25 17:44:05 -07:00
|
|
|
void obs_source_send_mouse_move(obs_source_t *source,
|
2019-06-22 22:13:45 -07:00
|
|
|
const struct obs_mouse_event *event,
|
|
|
|
bool mouse_leave)
|
2014-09-14 13:31:57 -07:00
|
|
|
{
|
2015-10-17 02:51:13 -07:00
|
|
|
if (!obs_source_valid(source, "obs_source_send_mouse_move"))
|
2014-09-14 13:31:57 -07:00
|
|
|
return;
|
|
|
|
|
|
|
|
if (source->info.output_flags & OBS_SOURCE_INTERACTION) {
|
|
|
|
if (source->info.mouse_move) {
|
2019-06-22 22:13:45 -07:00
|
|
|
source->info.mouse_move(source->context.data, event,
|
|
|
|
mouse_leave);
|
2014-09-14 13:31:57 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-09-25 17:44:05 -07:00
|
|
|
void obs_source_send_mouse_wheel(obs_source_t *source,
|
2019-06-22 22:13:45 -07:00
|
|
|
const struct obs_mouse_event *event,
|
|
|
|
int x_delta, int y_delta)
|
2014-09-14 13:31:57 -07:00
|
|
|
{
|
2015-10-17 02:51:13 -07:00
|
|
|
if (!obs_source_valid(source, "obs_source_send_mouse_wheel"))
|
2014-09-14 13:31:57 -07:00
|
|
|
return;
|
|
|
|
|
|
|
|
if (source->info.output_flags & OBS_SOURCE_INTERACTION) {
|
|
|
|
if (source->info.mouse_wheel) {
|
2019-06-22 22:13:45 -07:00
|
|
|
source->info.mouse_wheel(source->context.data, event,
|
|
|
|
x_delta, y_delta);
|
2014-09-14 13:31:57 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-09-25 17:44:05 -07:00
|
|
|
void obs_source_send_focus(obs_source_t *source, bool focus)
|
2014-09-14 13:31:57 -07:00
|
|
|
{
|
2015-10-17 02:51:13 -07:00
|
|
|
if (!obs_source_valid(source, "obs_source_send_focus"))
|
2014-09-14 13:31:57 -07:00
|
|
|
return;
|
|
|
|
|
|
|
|
if (source->info.output_flags & OBS_SOURCE_INTERACTION) {
|
|
|
|
if (source->info.focus) {
|
|
|
|
source->info.focus(source->context.data, focus);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-09-25 17:44:05 -07:00
|
|
|
void obs_source_send_key_click(obs_source_t *source,
|
2019-06-22 22:13:45 -07:00
|
|
|
const struct obs_key_event *event, bool key_up)
|
2014-09-14 13:31:57 -07:00
|
|
|
{
|
2015-10-17 02:51:13 -07:00
|
|
|
if (!obs_source_valid(source, "obs_source_send_key_click"))
|
2014-09-14 13:31:57 -07:00
|
|
|
return;
|
|
|
|
|
|
|
|
if (source->info.output_flags & OBS_SOURCE_INTERACTION) {
|
|
|
|
if (source->info.key_click) {
|
|
|
|
source->info.key_click(source->context.data, event,
|
2019-06-22 22:13:45 -07:00
|
|
|
key_up);
|
2014-09-14 13:31:57 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-09-25 17:44:05 -07:00
|
|
|
static void activate_source(obs_source_t *source)
|
2013-09-30 19:37:13 -07:00
|
|
|
{
|
2014-05-04 16:20:11 -07:00
|
|
|
if (source->context.data && source->info.activate)
|
libobs: Add services API, reduce repeated code
Add API for streaming services. The services API simplifies the
creation of custom service features and user interface.
Custom streaming services later on will be able to do things such as:
- Be able to use service-specific APIs via modules, allowing a more
direct means of communicating with the service and requesting or
setting service-specific information
- Get URL/stream key via other means of authentication such as OAuth,
or be able to build custom URLs for services that require that sort
of thing.
- Query information (such as viewer count, chat, follower
notifications, and other information)
- Set channel information (such as current game, current channel title,
activating commercials)
Also, I reduce some repeated code that was used for all libobs objects.
This includes the name of the object, the private data, settings, as
well as the signal and procedure handlers.
I also switched to using linked lists for the global object lists,
rather than using an array of pointers (you could say it was..
pointless.) ..Anyway, the linked list info is also stored in the shared
context data structure.
2014-04-19 20:38:53 -07:00
|
|
|
source->info.activate(source->context.data);
|
2014-03-01 04:54:55 -08:00
|
|
|
obs_source_dosignal(source, "source_activate", "activate");
|
2013-09-30 19:37:13 -07:00
|
|
|
}
|
|
|
|
|
2014-09-25 17:44:05 -07:00
|
|
|
static void deactivate_source(obs_source_t *source)
|
2013-09-30 19:37:13 -07:00
|
|
|
{
|
2014-05-04 16:20:11 -07:00
|
|
|
if (source->context.data && source->info.deactivate)
|
libobs: Add services API, reduce repeated code
Add API for streaming services. The services API simplifies the
creation of custom service features and user interface.
Custom streaming services later on will be able to do things such as:
- Be able to use service-specific APIs via modules, allowing a more
direct means of communicating with the service and requesting or
setting service-specific information
- Get URL/stream key via other means of authentication such as OAuth,
or be able to build custom URLs for services that require that sort
of thing.
- Query information (such as viewer count, chat, follower
notifications, and other information)
- Set channel information (such as current game, current channel title,
activating commercials)
Also, I reduce some repeated code that was used for all libobs objects.
This includes the name of the object, the private data, settings, as
well as the signal and procedure handlers.
I also switched to using linked lists for the global object lists,
rather than using an array of pointers (you could say it was..
pointless.) ..Anyway, the linked list info is also stored in the shared
context data structure.
2014-04-19 20:38:53 -07:00
|
|
|
source->info.deactivate(source->context.data);
|
2014-03-01 04:54:55 -08:00
|
|
|
obs_source_dosignal(source, "source_deactivate", "deactivate");
|
2014-02-23 16:46:00 -08:00
|
|
|
}
|
2014-02-20 21:04:14 -08:00
|
|
|
|
2014-09-25 17:44:05 -07:00
|
|
|
static void show_source(obs_source_t *source)
|
2014-02-23 16:46:00 -08:00
|
|
|
{
|
2014-05-04 16:20:11 -07:00
|
|
|
if (source->context.data && source->info.show)
|
libobs: Add services API, reduce repeated code
Add API for streaming services. The services API simplifies the
creation of custom service features and user interface.
Custom streaming services later on will be able to do things such as:
- Be able to use service-specific APIs via modules, allowing a more
direct means of communicating with the service and requesting or
setting service-specific information
- Get URL/stream key via other means of authentication such as OAuth,
or be able to build custom URLs for services that require that sort
of thing.
- Query information (such as viewer count, chat, follower
notifications, and other information)
- Set channel information (such as current game, current channel title,
activating commercials)
Also, I reduce some repeated code that was used for all libobs objects.
This includes the name of the object, the private data, settings, as
well as the signal and procedure handlers.
I also switched to using linked lists for the global object lists,
rather than using an array of pointers (you could say it was..
pointless.) ..Anyway, the linked list info is also stored in the shared
context data structure.
2014-04-19 20:38:53 -07:00
|
|
|
source->info.show(source->context.data);
|
2014-03-01 04:54:55 -08:00
|
|
|
obs_source_dosignal(source, "source_show", "show");
|
2014-02-23 16:46:00 -08:00
|
|
|
}
|
|
|
|
|
2014-09-25 17:44:05 -07:00
|
|
|
static void hide_source(obs_source_t *source)
|
2014-02-23 16:46:00 -08:00
|
|
|
{
|
2014-05-04 16:20:11 -07:00
|
|
|
if (source->context.data && source->info.hide)
|
libobs: Add services API, reduce repeated code
Add API for streaming services. The services API simplifies the
creation of custom service features and user interface.
Custom streaming services later on will be able to do things such as:
- Be able to use service-specific APIs via modules, allowing a more
direct means of communicating with the service and requesting or
setting service-specific information
- Get URL/stream key via other means of authentication such as OAuth,
or be able to build custom URLs for services that require that sort
of thing.
- Query information (such as viewer count, chat, follower
notifications, and other information)
- Set channel information (such as current game, current channel title,
activating commercials)
Also, I reduce some repeated code that was used for all libobs objects.
This includes the name of the object, the private data, settings, as
well as the signal and procedure handlers.
I also switched to using linked lists for the global object lists,
rather than using an array of pointers (you could say it was..
pointless.) ..Anyway, the linked list info is also stored in the shared
context data structure.
2014-04-19 20:38:53 -07:00
|
|
|
source->info.hide(source->context.data);
|
2014-03-01 04:54:55 -08:00
|
|
|
obs_source_dosignal(source, "source_hide", "hide");
|
2014-02-20 21:04:14 -08:00
|
|
|
}
|
|
|
|
|
2014-09-25 17:44:05 -07:00
|
|
|
static void activate_tree(obs_source_t *parent, obs_source_t *child,
|
2019-06-22 22:13:45 -07:00
|
|
|
void *param)
|
2014-02-20 21:04:14 -08:00
|
|
|
{
|
2015-03-02 18:46:46 -08:00
|
|
|
os_atomic_inc_long(&child->activate_refs);
|
Implement volume handling
- Remove obs_source::type because it became redundant now that the
type is always stored in the obs_source::info variable.
- Apply presentation volumes of 1.0 and 0.0 to sources when they
activate/deactivate, respectively. It also applies that presentation
volume to all sub-sources, with exception of transition sources.
Transition sources must apply presentation volume manually to their
sub-sources with the new transition functions below.
- Add a "transition_volume" variable to obs_source structure, and add
three functions for handling volume for transitions:
* obs_transition_begin_frame
* obs_source_set_transition_vol
* obs_transition_end_frame
Because the to/from targets of a transition source might both contain
some of the same sources, handling the transitioning of volumes for
that specific situation becomes an issue.
So for transitions, instead of modifying the presentation volumes
directly for both sets of sources, we do this:
- First, call obs_transition_begin_frame at the beginning of each
transition frame, which will reset transition volumes for all
sub-sources to 0. Presentation volumes remain unchanged.
- Call obs_source_set_transition_vol on each sub-source, which will
then add the volume to the transition volume for each source in
that source's tree. Presentation volumes still remain unchanged.
- Then you call obs_trandition_end_frame when complete, which will
then finally set the presentation volumes to the transition
volumes.
For example, let's say that there's one source that's within both the
"transitioning from" sources and "transition to" sources. It would
add both the fade in and fade out volumes to that source, and then
when the frame is complete, it would set the presentation volume to
the sum of those two values, rather than set the presentation volume
for that same source twice which would cause weird volume jittering
and also set the wrong values.
2014-02-21 18:41:38 -08:00
|
|
|
|
|
|
|
UNUSED_PARAMETER(parent);
|
|
|
|
UNUSED_PARAMETER(param);
|
2014-02-20 21:04:14 -08:00
|
|
|
}
|
|
|
|
|
2014-09-25 17:44:05 -07:00
|
|
|
static void deactivate_tree(obs_source_t *parent, obs_source_t *child,
|
2019-06-22 22:13:45 -07:00
|
|
|
void *param)
|
2014-02-20 21:04:14 -08:00
|
|
|
{
|
2015-03-02 18:46:46 -08:00
|
|
|
os_atomic_dec_long(&child->activate_refs);
|
Implement volume handling
- Remove obs_source::type because it became redundant now that the
type is always stored in the obs_source::info variable.
- Apply presentation volumes of 1.0 and 0.0 to sources when they
activate/deactivate, respectively. It also applies that presentation
volume to all sub-sources, with exception of transition sources.
Transition sources must apply presentation volume manually to their
sub-sources with the new transition functions below.
- Add a "transition_volume" variable to obs_source structure, and add
three functions for handling volume for transitions:
* obs_transition_begin_frame
* obs_source_set_transition_vol
* obs_transition_end_frame
Because the to/from targets of a transition source might both contain
some of the same sources, handling the transitioning of volumes for
that specific situation becomes an issue.
So for transitions, instead of modifying the presentation volumes
directly for both sets of sources, we do this:
- First, call obs_transition_begin_frame at the beginning of each
transition frame, which will reset transition volumes for all
sub-sources to 0. Presentation volumes remain unchanged.
- Call obs_source_set_transition_vol on each sub-source, which will
then add the volume to the transition volume for each source in
that source's tree. Presentation volumes still remain unchanged.
- Then you call obs_trandition_end_frame when complete, which will
then finally set the presentation volumes to the transition
volumes.
For example, let's say that there's one source that's within both the
"transitioning from" sources and "transition to" sources. It would
add both the fade in and fade out volumes to that source, and then
when the frame is complete, it would set the presentation volume to
the sum of those two values, rather than set the presentation volume
for that same source twice which would cause weird volume jittering
and also set the wrong values.
2014-02-21 18:41:38 -08:00
|
|
|
|
|
|
|
UNUSED_PARAMETER(parent);
|
|
|
|
UNUSED_PARAMETER(param);
|
2014-02-20 21:04:14 -08:00
|
|
|
}
|
|
|
|
|
2014-09-25 17:44:05 -07:00
|
|
|
static void show_tree(obs_source_t *parent, obs_source_t *child, void *param)
|
2014-02-23 16:46:00 -08:00
|
|
|
{
|
2015-03-02 18:46:46 -08:00
|
|
|
os_atomic_inc_long(&child->show_refs);
|
2014-02-23 16:46:00 -08:00
|
|
|
|
|
|
|
UNUSED_PARAMETER(parent);
|
|
|
|
UNUSED_PARAMETER(param);
|
|
|
|
}
|
|
|
|
|
2014-09-25 17:44:05 -07:00
|
|
|
static void hide_tree(obs_source_t *parent, obs_source_t *child, void *param)
|
2014-02-23 16:46:00 -08:00
|
|
|
{
|
2015-03-02 18:46:46 -08:00
|
|
|
os_atomic_dec_long(&child->show_refs);
|
2014-02-23 16:46:00 -08:00
|
|
|
|
|
|
|
UNUSED_PARAMETER(parent);
|
|
|
|
UNUSED_PARAMETER(param);
|
|
|
|
}
|
|
|
|
|
2014-09-25 17:44:05 -07:00
|
|
|
void obs_source_activate(obs_source_t *source, enum view_type type)
|
2014-02-20 21:04:14 -08:00
|
|
|
{
|
2015-10-17 02:51:13 -07:00
|
|
|
if (!obs_source_valid(source, "obs_source_activate"))
|
|
|
|
return;
|
2014-02-20 21:04:14 -08:00
|
|
|
|
2016-07-02 14:13:17 -07:00
|
|
|
os_atomic_inc_long(&source->show_refs);
|
|
|
|
obs_source_enum_active_tree(source, show_tree, NULL);
|
2014-02-23 16:46:00 -08:00
|
|
|
|
|
|
|
if (type == MAIN_VIEW) {
|
2016-07-02 14:13:17 -07:00
|
|
|
os_atomic_inc_long(&source->activate_refs);
|
|
|
|
obs_source_enum_active_tree(source, activate_tree, NULL);
|
2014-02-20 21:04:14 -08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-09-25 17:44:05 -07:00
|
|
|
void obs_source_deactivate(obs_source_t *source, enum view_type type)
|
2014-02-20 21:04:14 -08:00
|
|
|
{
|
2015-10-17 02:51:13 -07:00
|
|
|
if (!obs_source_valid(source, "obs_source_deactivate"))
|
|
|
|
return;
|
2014-02-20 21:04:14 -08:00
|
|
|
|
2016-07-02 14:13:17 -07:00
|
|
|
if (os_atomic_load_long(&source->show_refs) > 0) {
|
|
|
|
os_atomic_dec_long(&source->show_refs);
|
2015-12-22 04:59:02 -08:00
|
|
|
obs_source_enum_active_tree(source, hide_tree, NULL);
|
2014-02-23 16:46:00 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (type == MAIN_VIEW) {
|
2016-07-02 14:13:17 -07:00
|
|
|
if (os_atomic_load_long(&source->activate_refs) > 0) {
|
|
|
|
os_atomic_dec_long(&source->activate_refs);
|
2015-12-22 04:59:02 -08:00
|
|
|
obs_source_enum_active_tree(source, deactivate_tree,
|
2019-06-22 22:13:45 -07:00
|
|
|
NULL);
|
2014-02-23 16:46:00 -08:00
|
|
|
}
|
2014-02-20 21:04:14 -08:00
|
|
|
}
|
2013-09-30 19:37:13 -07:00
|
|
|
}
|
|
|
|
|
2015-03-26 22:49:48 -07:00
|
|
|
static inline struct obs_source_frame *get_closest_frame(obs_source_t *source,
|
2019-06-22 22:13:45 -07:00
|
|
|
uint64_t sys_time);
|
2016-11-05 03:34:02 -07:00
|
|
|
bool set_async_texture_size(struct obs_source *source,
|
2019-06-22 22:13:45 -07:00
|
|
|
const struct obs_source_frame *frame);
|
2016-11-05 03:34:02 -07:00
|
|
|
|
|
|
|
static void async_tick(obs_source_t *source)
|
|
|
|
{
|
|
|
|
uint64_t sys_time = obs->video.video_time;
|
|
|
|
|
|
|
|
pthread_mutex_lock(&source->async_mutex);
|
|
|
|
|
|
|
|
if (deinterlacing_enabled(source)) {
|
|
|
|
deinterlace_process_last_frame(source, sys_time);
|
|
|
|
} else {
|
|
|
|
if (source->cur_async_frame) {
|
2019-06-22 22:13:45 -07:00
|
|
|
remove_async_frame(source, source->cur_async_frame);
|
2016-11-05 03:34:02 -07:00
|
|
|
source->cur_async_frame = NULL;
|
|
|
|
}
|
|
|
|
|
2019-06-22 22:13:45 -07:00
|
|
|
source->cur_async_frame = get_closest_frame(source, sys_time);
|
2016-11-05 03:34:02 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
source->last_sys_timestamp = sys_time;
|
|
|
|
pthread_mutex_unlock(&source->async_mutex);
|
|
|
|
|
|
|
|
if (source->cur_async_frame)
|
2019-06-22 22:13:45 -07:00
|
|
|
source->async_update_texture =
|
|
|
|
set_async_texture_size(source, source->cur_async_frame);
|
2016-11-05 03:34:02 -07:00
|
|
|
}
|
2015-03-26 22:49:48 -07:00
|
|
|
|
2014-09-25 17:44:05 -07:00
|
|
|
void obs_source_video_tick(obs_source_t *source, float seconds)
|
2013-09-30 19:37:13 -07:00
|
|
|
{
|
2015-03-02 18:46:46 -08:00
|
|
|
bool now_showing, now_active;
|
|
|
|
|
2015-10-17 02:51:13 -07:00
|
|
|
if (!obs_source_valid(source, "obs_source_video_tick"))
|
|
|
|
return;
|
2014-02-23 21:39:33 -08:00
|
|
|
|
libobs: Implement transition sources
Transition sources are implemented by registering a source type as
OBS_SOURCE_TYPE_TRANSITION. They're automatically marked as video
composite sources, and video_render/audio_render callbacks must be set
when registering the source. get_width and get_height callbacks are
unused for these types of sources, as transitions automatically handle
width/height behind the scenes with the transition settings.
In the video_render callback, the helper function
obs_transition_video_render is used to assist in automatically
processing and rendering the audio. A render callback is passed to the
function, which in turn passes to/from textures that are automatically
rendered in the back-end.
Similarly, in the audio_render callback, the helper function
obs_transition_audio_render is used to assist in automatically
processing and rendering the audio. Two mix callbacks are used to
handle how the source/destination sources are mixed together. To ensure
the best possible quality, audio processing is per-sample.
Transitions can be set to automatically resize, or they can be set to
have a fixed size. Sources within transitions can be made to scale to
the transition size (with or without aspect ratio), or to not scale
unless they're bigger than the transition. They can have a specific
alignment within the transition, or they just default to top-left.
These features are implemented for the purpose of extending transitions
to also act as "switch" sources later, where you can switch to/from two
different sources using the transition animation.
Planned (but not yet implemented and lower priority) features:
- "Switch" transitions which allow the ability to switch back and forth
between two sources with a transitioning animation without discarding
the references
- Easing options to allow the option to transition with a bezier or
custom curve
- Manual transitioning to allow the front-end/user to manually control
the transition offset
2016-01-03 16:41:14 -08:00
|
|
|
if (source->info.type == OBS_SOURCE_TYPE_TRANSITION)
|
2019-12-27 16:33:38 -08:00
|
|
|
obs_transition_tick(source, seconds);
|
libobs: Implement transition sources
Transition sources are implemented by registering a source type as
OBS_SOURCE_TYPE_TRANSITION. They're automatically marked as video
composite sources, and video_render/audio_render callbacks must be set
when registering the source. get_width and get_height callbacks are
unused for these types of sources, as transitions automatically handle
width/height behind the scenes with the transition settings.
In the video_render callback, the helper function
obs_transition_video_render is used to assist in automatically
processing and rendering the audio. A render callback is passed to the
function, which in turn passes to/from textures that are automatically
rendered in the back-end.
Similarly, in the audio_render callback, the helper function
obs_transition_audio_render is used to assist in automatically
processing and rendering the audio. Two mix callbacks are used to
handle how the source/destination sources are mixed together. To ensure
the best possible quality, audio processing is per-sample.
Transitions can be set to automatically resize, or they can be set to
have a fixed size. Sources within transitions can be made to scale to
the transition size (with or without aspect ratio), or to not scale
unless they're bigger than the transition. They can have a specific
alignment within the transition, or they just default to top-left.
These features are implemented for the purpose of extending transitions
to also act as "switch" sources later, where you can switch to/from two
different sources using the transition animation.
Planned (but not yet implemented and lower priority) features:
- "Switch" transitions which allow the ability to switch back and forth
between two sources with a transitioning animation without discarding
the references
- Easing options to allow the option to transition with a bezier or
custom curve
- Manual transitioning to allow the front-end/user to manually control
the transition offset
2016-01-03 16:41:14 -08:00
|
|
|
|
2016-11-05 03:34:02 -07:00
|
|
|
if ((source->info.output_flags & OBS_SOURCE_ASYNC) != 0)
|
|
|
|
async_tick(source);
|
2015-03-26 22:49:48 -07:00
|
|
|
|
Implement RTMP module (still needs drop code)
- Implement the RTMP output module. This time around, we just use a
simple FLV muxer, then just write to the stream with RTMP_Write.
Easy and effective.
- Fix the FLV muxer, the muxer now outputs proper FLV packets.
- Output API:
* When using encoders, automatically interleave encoded packets
before sending it to the output.
* Pair encoders and have them automatically wait for the other to
start to ensure sync.
* Change 'obs_output_signal_start_fail' to 'obs_output_signal_stop'
because it was a bit confusing, and doing this makes a lot more
sense for outputs that need to stop suddenly (disconnections/etc).
- Encoder API:
* Remove some unnecessary encoder functions from the actual API and
make them internal. Most of the encoder functions are handled
automatically by outputs anyway, so there's no real need to expose
them and end up inadvertently confusing plugin writers.
* Have audio encoders wait for the video encoder to get a frame, then
start at the exact data point that the first video frame starts to
ensure the most accrate sync of video/audio possible.
* Add a required 'frame_size' callback for audio encoders that
returns the expected number of frames desired to encode with. This
way, the libobs encoder API can handle the circular buffering
internally automatically for the encoder modules, so encoder
writers don't have to do it themselves.
- Fix a few bugs in the serializer interface. It was passing the wrong
variable for the data in a few cases.
- If a source has video, make obs_source_update defer the actual update
callback until the tick function is called to prevent threading
issues.
2014-04-07 22:00:10 -07:00
|
|
|
if (source->defer_update)
|
|
|
|
obs_source_deferred_update(source);
|
|
|
|
|
Revamp API and start using doxygen
The API used to be designed in such a way to where it would expect
exports for each individual source/output/encoder/etc. You would export
functions for each and it would automatically load those functions based
on a specific naming scheme from the module.
The idea behind this was that I wanted to limit the usage of structures
in the API so only functions could be used. It was an interesting idea
in theory, but this idea turned out to be flawed in a number of ways:
1.) Requiring exports to create sources/outputs/encoders/etc meant that
you could not create them by any other means, which meant that
things like faruton's .net plugin would become difficult.
2.) Export function declarations could not be checked, therefore if you
created a function with the wrong parameters and parameter types,
the compiler wouldn't know how to check for that.
3.) Required overly complex load functions in libobs just to handle it.
It makes much more sense to just have a load function that you call
manually. Complexity is the bane of all good programs.
4.) It required that you have functions of specific names, which looked
and felt somewhat unsightly.
So, to fix these issues, I replaced it with a more commonly used API
scheme, seen commonly in places like kernels and typical C libraries
with abstraction. You simply create a structure that contains the
callback definitions, and you pass it to a function to register that
definition (such as obs_register_source), which you call in the
obs_module_load of the module.
It will also automatically check the structure size and ensure that it
only loads the required values if the structure happened to add new
values in an API change.
The "main" source file for each module must include obs-module.h, and
must use OBS_DECLARE_MODULE() within that source file.
Also, started writing some doxygen documentation in to the main library
headers. Will add more detailed documentation as I go.
2014-02-12 07:04:50 -08:00
|
|
|
/* reset the filter render texture information once every frame */
|
|
|
|
if (source->filter_texrender)
|
2014-08-07 23:42:07 -07:00
|
|
|
gs_texrender_reset(source->filter_texrender);
|
Revamp API and start using doxygen
The API used to be designed in such a way to where it would expect
exports for each individual source/output/encoder/etc. You would export
functions for each and it would automatically load those functions based
on a specific naming scheme from the module.
The idea behind this was that I wanted to limit the usage of structures
in the API so only functions could be used. It was an interesting idea
in theory, but this idea turned out to be flawed in a number of ways:
1.) Requiring exports to create sources/outputs/encoders/etc meant that
you could not create them by any other means, which meant that
things like faruton's .net plugin would become difficult.
2.) Export function declarations could not be checked, therefore if you
created a function with the wrong parameters and parameter types,
the compiler wouldn't know how to check for that.
3.) Required overly complex load functions in libobs just to handle it.
It makes much more sense to just have a load function that you call
manually. Complexity is the bane of all good programs.
4.) It required that you have functions of specific names, which looked
and felt somewhat unsightly.
So, to fix these issues, I replaced it with a more commonly used API
scheme, seen commonly in places like kernels and typical C libraries
with abstraction. You simply create a structure that contains the
callback definitions, and you pass it to a function to register that
definition (such as obs_register_source), which you call in the
obs_module_load of the module.
It will also automatically check the structure size and ensure that it
only loads the required values if the structure happened to add new
values in an API change.
The "main" source file for each module must include obs-module.h, and
must use OBS_DECLARE_MODULE() within that source file.
Also, started writing some doxygen documentation in to the main library
headers. Will add more detailed documentation as I go.
2014-02-12 07:04:50 -08:00
|
|
|
|
2015-03-02 18:46:46 -08:00
|
|
|
/* call show/hide if the reference changed */
|
|
|
|
now_showing = !!source->show_refs;
|
|
|
|
if (now_showing != source->showing) {
|
|
|
|
if (now_showing) {
|
|
|
|
show_source(source);
|
|
|
|
} else {
|
|
|
|
hide_source(source);
|
|
|
|
}
|
|
|
|
|
2019-06-12 17:30:07 -07:00
|
|
|
if (source->filters.num) {
|
|
|
|
for (size_t i = source->filters.num; i > 0; i--) {
|
|
|
|
obs_source_t *filter =
|
|
|
|
source->filters.array[i - 1];
|
|
|
|
if (now_showing) {
|
|
|
|
show_source(filter);
|
|
|
|
} else {
|
|
|
|
hide_source(filter);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-03-02 18:46:46 -08:00
|
|
|
source->showing = now_showing;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* call activate/deactivate if the reference changed */
|
|
|
|
now_active = !!source->activate_refs;
|
|
|
|
if (now_active != source->active) {
|
|
|
|
if (now_active) {
|
|
|
|
activate_source(source);
|
|
|
|
} else {
|
|
|
|
deactivate_source(source);
|
|
|
|
}
|
|
|
|
|
2019-06-12 17:30:07 -07:00
|
|
|
if (source->filters.num) {
|
|
|
|
for (size_t i = source->filters.num; i > 0; i--) {
|
|
|
|
obs_source_t *filter =
|
|
|
|
source->filters.array[i - 1];
|
|
|
|
if (now_active) {
|
|
|
|
activate_source(filter);
|
|
|
|
} else {
|
|
|
|
deactivate_source(filter);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-03-02 18:46:46 -08:00
|
|
|
source->active = now_active;
|
|
|
|
}
|
|
|
|
|
2014-05-04 16:20:11 -07:00
|
|
|
if (source->context.data && source->info.video_tick)
|
libobs: Add services API, reduce repeated code
Add API for streaming services. The services API simplifies the
creation of custom service features and user interface.
Custom streaming services later on will be able to do things such as:
- Be able to use service-specific APIs via modules, allowing a more
direct means of communicating with the service and requesting or
setting service-specific information
- Get URL/stream key via other means of authentication such as OAuth,
or be able to build custom URLs for services that require that sort
of thing.
- Query information (such as viewer count, chat, follower
notifications, and other information)
- Set channel information (such as current game, current channel title,
activating commercials)
Also, I reduce some repeated code that was used for all libobs objects.
This includes the name of the object, the private data, settings, as
well as the signal and procedure handlers.
I also switched to using linked lists for the global object lists,
rather than using an array of pointers (you could say it was..
pointless.) ..Anyway, the linked list info is also stored in the shared
context data structure.
2014-04-19 20:38:53 -07:00
|
|
|
source->info.video_tick(source->context.data, seconds);
|
2014-09-12 00:35:21 -07:00
|
|
|
|
|
|
|
source->async_rendered = false;
|
2016-03-15 20:39:36 -07:00
|
|
|
source->deinterlace_rendered = false;
|
2013-09-30 19:37:13 -07:00
|
|
|
}
|
|
|
|
|
2014-02-09 04:51:06 -08:00
|
|
|
/* unless the value is 3+ hours worth of frames, this won't overflow */
|
2015-12-20 01:33:12 -08:00
|
|
|
static inline uint64_t conv_frames_to_time(const size_t sample_rate,
|
2019-06-22 22:13:45 -07:00
|
|
|
const size_t frames)
|
2014-01-10 11:45:53 -08:00
|
|
|
{
|
2017-08-03 15:43:59 -07:00
|
|
|
if (!sample_rate)
|
|
|
|
return 0;
|
2019-06-22 22:13:45 -07:00
|
|
|
|
2015-12-20 01:33:12 -08:00
|
|
|
return (uint64_t)frames * 1000000000ULL / (uint64_t)sample_rate;
|
2014-01-10 11:45:53 -08:00
|
|
|
}
|
|
|
|
|
libobs: Implement new audio subsystem
The new audio subsystem fixes two issues:
- First Primary issue it fixes is the ability for parent sources to
intercept the audio of child sources, and do custom processing on
them. The main reason for this was the ability to do custom
cross-fading in transitions, but it's also useful for things such as
side-chain effects, applying audio effects to entire scenes, applying
scene-specific audio filters on sub-sources, and other such
possibilities.
- The secondary issue that needed fixing was audio buffering.
Previously, audio buffering was always a fixed buffer size, so it
would always have exactly a certain number of milliseconds of audio
buffering (and thus output delay). Instead, it now dynamically
increases audio buffering only as necessary, minimizing output delay,
and removing the need for users to have to worry about an audio
buffering setting.
The new design makes it so that audio from the leaves of the scene graph
flow to the root nodes, and can be intercepted by parent sources. Each
audio source handles its own buffering, and each audio tick a specific
number of audio frames are popped from the front of the circular buffer
on each audio source. Composite sources (such as scenes) can access the
audio for child sources and do custom processing or mixing on that
audio. Composite sources use the audio_render callback of sources to do
synchronous or deferred audio processing per audio tick. Things like
scenes now mix audio from their sub-sources.
2015-12-20 03:06:35 -08:00
|
|
|
static inline size_t conv_time_to_frames(const size_t sample_rate,
|
2019-06-22 22:13:45 -07:00
|
|
|
const uint64_t duration)
|
libobs: Implement new audio subsystem
The new audio subsystem fixes two issues:
- First Primary issue it fixes is the ability for parent sources to
intercept the audio of child sources, and do custom processing on
them. The main reason for this was the ability to do custom
cross-fading in transitions, but it's also useful for things such as
side-chain effects, applying audio effects to entire scenes, applying
scene-specific audio filters on sub-sources, and other such
possibilities.
- The secondary issue that needed fixing was audio buffering.
Previously, audio buffering was always a fixed buffer size, so it
would always have exactly a certain number of milliseconds of audio
buffering (and thus output delay). Instead, it now dynamically
increases audio buffering only as necessary, minimizing output delay,
and removing the need for users to have to worry about an audio
buffering setting.
The new design makes it so that audio from the leaves of the scene graph
flow to the root nodes, and can be intercepted by parent sources. Each
audio source handles its own buffering, and each audio tick a specific
number of audio frames are popped from the front of the circular buffer
on each audio source. Composite sources (such as scenes) can access the
audio for child sources and do custom processing or mixing on that
audio. Composite sources use the audio_render callback of sources to do
synchronous or deferred audio processing per audio tick. Things like
scenes now mix audio from their sub-sources.
2015-12-20 03:06:35 -08:00
|
|
|
{
|
|
|
|
return (size_t)(duration * (uint64_t)sample_rate / 1000000000ULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* maximum buffer size */
|
2019-06-22 22:13:45 -07:00
|
|
|
#define MAX_BUF_SIZE (1000 * AUDIO_OUTPUT_FRAMES * sizeof(float))
|
2014-01-10 11:45:53 -08:00
|
|
|
|
2017-09-28 06:14:36 -07:00
|
|
|
/* time threshold in nanoseconds to ensure audio timing is as seamless as
|
|
|
|
* possible */
|
|
|
|
#define TS_SMOOTHING_THRESHOLD 70000000ULL
|
|
|
|
|
2014-10-19 09:21:39 -07:00
|
|
|
static inline void reset_audio_timing(obs_source_t *source, uint64_t timestamp,
|
2019-06-22 22:13:45 -07:00
|
|
|
uint64_t os_time)
|
2014-01-10 11:45:53 -08:00
|
|
|
{
|
2019-06-22 22:13:45 -07:00
|
|
|
source->timing_set = true;
|
2014-10-19 09:21:39 -07:00
|
|
|
source->timing_adjust = os_time - timestamp;
|
2014-01-10 11:45:53 -08:00
|
|
|
}
|
2013-10-24 00:57:55 -07:00
|
|
|
|
2015-12-17 04:28:35 -08:00
|
|
|
static void reset_audio_data(obs_source_t *source, uint64_t os_time)
|
|
|
|
{
|
|
|
|
for (size_t i = 0; i < MAX_AUDIO_CHANNELS; i++) {
|
|
|
|
if (source->audio_input_buf[i].size)
|
|
|
|
circlebuf_pop_front(&source->audio_input_buf[i], NULL,
|
2019-06-22 22:13:45 -07:00
|
|
|
source->audio_input_buf[i].size);
|
2015-12-17 04:28:35 -08:00
|
|
|
}
|
|
|
|
|
2016-01-31 14:04:54 -08:00
|
|
|
source->last_audio_input_buf_size = 0;
|
2015-12-17 04:28:35 -08:00
|
|
|
source->audio_ts = os_time;
|
2017-03-26 05:51:45 -07:00
|
|
|
source->next_audio_sys_ts_min = os_time;
|
2015-12-17 04:28:35 -08:00
|
|
|
}
|
|
|
|
|
2019-06-22 22:13:45 -07:00
|
|
|
static void handle_ts_jump(obs_source_t *source, uint64_t expected, uint64_t ts,
|
|
|
|
uint64_t diff, uint64_t os_time)
|
2014-01-12 01:40:51 -08:00
|
|
|
{
|
2019-06-22 22:13:45 -07:00
|
|
|
blog(LOG_DEBUG,
|
|
|
|
"Timestamp for source '%s' jumped by '%" PRIu64 "', "
|
|
|
|
"expected value %" PRIu64 ", input value %" PRIu64,
|
|
|
|
source->context.name, diff, expected, ts);
|
2014-01-12 01:40:51 -08:00
|
|
|
|
2015-12-17 04:28:35 -08:00
|
|
|
pthread_mutex_lock(&source->audio_buf_mutex);
|
2015-08-02 13:54:10 -07:00
|
|
|
reset_audio_timing(source, ts, os_time);
|
2015-12-17 04:28:35 -08:00
|
|
|
pthread_mutex_unlock(&source->audio_buf_mutex);
|
2014-01-12 01:40:51 -08:00
|
|
|
}
|
|
|
|
|
2014-12-13 05:10:45 -08:00
|
|
|
static void source_signal_audio_data(obs_source_t *source,
|
2019-06-22 22:13:45 -07:00
|
|
|
const struct audio_data *in, bool muted)
|
2014-06-03 04:43:00 -07:00
|
|
|
{
|
2016-01-07 19:48:36 -08:00
|
|
|
pthread_mutex_lock(&source->audio_cb_mutex);
|
2014-06-03 04:43:00 -07:00
|
|
|
|
2016-01-07 19:48:36 -08:00
|
|
|
for (size_t i = source->audio_cb_list.num; i > 0; i--) {
|
|
|
|
struct audio_cb_info info = source->audio_cb_list.array[i - 1];
|
|
|
|
info.callback(info.param, source, in, muted);
|
|
|
|
}
|
2014-06-03 04:43:00 -07:00
|
|
|
|
2016-01-07 19:48:36 -08:00
|
|
|
pthread_mutex_unlock(&source->audio_cb_mutex);
|
2014-06-03 04:43:00 -07:00
|
|
|
}
|
|
|
|
|
2014-10-19 09:21:39 -07:00
|
|
|
static inline uint64_t uint64_diff(uint64_t ts1, uint64_t ts2)
|
|
|
|
{
|
2019-06-22 22:13:45 -07:00
|
|
|
return (ts1 < ts2) ? (ts2 - ts1) : (ts1 - ts2);
|
2014-10-19 09:21:39 -07:00
|
|
|
}
|
|
|
|
|
2015-12-17 04:28:35 -08:00
|
|
|
static inline size_t get_buf_placement(audio_t *audio, uint64_t offset)
|
|
|
|
{
|
|
|
|
uint32_t sample_rate = audio_output_get_sample_rate(audio);
|
|
|
|
return (size_t)(offset * (uint64_t)sample_rate / 1000000000ULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void source_output_audio_place(obs_source_t *source,
|
2019-06-22 22:13:45 -07:00
|
|
|
const struct audio_data *in)
|
2015-12-17 04:28:35 -08:00
|
|
|
{
|
|
|
|
audio_t *audio = obs->audio.audio;
|
|
|
|
size_t buf_placement;
|
|
|
|
size_t channels = audio_output_get_channels(audio);
|
|
|
|
size_t size = in->frames * sizeof(float);
|
|
|
|
|
|
|
|
if (!source->audio_ts || in->timestamp < source->audio_ts)
|
|
|
|
reset_audio_data(source, in->timestamp);
|
|
|
|
|
2019-06-22 22:13:45 -07:00
|
|
|
buf_placement =
|
|
|
|
get_buf_placement(audio, in->timestamp - source->audio_ts) *
|
|
|
|
sizeof(float);
|
2015-12-17 04:28:35 -08:00
|
|
|
|
|
|
|
#if DEBUG_AUDIO == 1
|
2019-06-22 22:13:45 -07:00
|
|
|
blog(LOG_DEBUG,
|
|
|
|
"frames: %lu, size: %lu, placement: %lu, base_ts: %llu, ts: %llu",
|
|
|
|
(unsigned long)in->frames,
|
|
|
|
(unsigned long)source->audio_input_buf[0].size,
|
|
|
|
(unsigned long)buf_placement, source->audio_ts, in->timestamp);
|
2015-12-17 04:28:35 -08:00
|
|
|
#endif
|
|
|
|
|
libobs: Implement new audio subsystem
The new audio subsystem fixes two issues:
- First Primary issue it fixes is the ability for parent sources to
intercept the audio of child sources, and do custom processing on
them. The main reason for this was the ability to do custom
cross-fading in transitions, but it's also useful for things such as
side-chain effects, applying audio effects to entire scenes, applying
scene-specific audio filters on sub-sources, and other such
possibilities.
- The secondary issue that needed fixing was audio buffering.
Previously, audio buffering was always a fixed buffer size, so it
would always have exactly a certain number of milliseconds of audio
buffering (and thus output delay). Instead, it now dynamically
increases audio buffering only as necessary, minimizing output delay,
and removing the need for users to have to worry about an audio
buffering setting.
The new design makes it so that audio from the leaves of the scene graph
flow to the root nodes, and can be intercepted by parent sources. Each
audio source handles its own buffering, and each audio tick a specific
number of audio frames are popped from the front of the circular buffer
on each audio source. Composite sources (such as scenes) can access the
audio for child sources and do custom processing or mixing on that
audio. Composite sources use the audio_render callback of sources to do
synchronous or deferred audio processing per audio tick. Things like
scenes now mix audio from their sub-sources.
2015-12-20 03:06:35 -08:00
|
|
|
/* do not allow the circular buffers to become too big */
|
|
|
|
if ((buf_placement + size) > MAX_BUF_SIZE)
|
|
|
|
return;
|
|
|
|
|
|
|
|
for (size_t i = 0; i < channels; i++) {
|
2015-12-17 04:28:35 -08:00
|
|
|
circlebuf_place(&source->audio_input_buf[i], buf_placement,
|
|
|
|
in->data[i], size);
|
libobs: Implement new audio subsystem
The new audio subsystem fixes two issues:
- First Primary issue it fixes is the ability for parent sources to
intercept the audio of child sources, and do custom processing on
them. The main reason for this was the ability to do custom
cross-fading in transitions, but it's also useful for things such as
side-chain effects, applying audio effects to entire scenes, applying
scene-specific audio filters on sub-sources, and other such
possibilities.
- The secondary issue that needed fixing was audio buffering.
Previously, audio buffering was always a fixed buffer size, so it
would always have exactly a certain number of milliseconds of audio
buffering (and thus output delay). Instead, it now dynamically
increases audio buffering only as necessary, minimizing output delay,
and removing the need for users to have to worry about an audio
buffering setting.
The new design makes it so that audio from the leaves of the scene graph
flow to the root nodes, and can be intercepted by parent sources. Each
audio source handles its own buffering, and each audio tick a specific
number of audio frames are popped from the front of the circular buffer
on each audio source. Composite sources (such as scenes) can access the
audio for child sources and do custom processing or mixing on that
audio. Composite sources use the audio_render callback of sources to do
synchronous or deferred audio processing per audio tick. Things like
scenes now mix audio from their sub-sources.
2015-12-20 03:06:35 -08:00
|
|
|
circlebuf_pop_back(&source->audio_input_buf[i], NULL,
|
2019-06-22 22:13:45 -07:00
|
|
|
source->audio_input_buf[i].size -
|
|
|
|
(buf_placement + size));
|
libobs: Implement new audio subsystem
The new audio subsystem fixes two issues:
- First Primary issue it fixes is the ability for parent sources to
intercept the audio of child sources, and do custom processing on
them. The main reason for this was the ability to do custom
cross-fading in transitions, but it's also useful for things such as
side-chain effects, applying audio effects to entire scenes, applying
scene-specific audio filters on sub-sources, and other such
possibilities.
- The secondary issue that needed fixing was audio buffering.
Previously, audio buffering was always a fixed buffer size, so it
would always have exactly a certain number of milliseconds of audio
buffering (and thus output delay). Instead, it now dynamically
increases audio buffering only as necessary, minimizing output delay,
and removing the need for users to have to worry about an audio
buffering setting.
The new design makes it so that audio from the leaves of the scene graph
flow to the root nodes, and can be intercepted by parent sources. Each
audio source handles its own buffering, and each audio tick a specific
number of audio frames are popped from the front of the circular buffer
on each audio source. Composite sources (such as scenes) can access the
audio for child sources and do custom processing or mixing on that
audio. Composite sources use the audio_render callback of sources to do
synchronous or deferred audio processing per audio tick. Things like
scenes now mix audio from their sub-sources.
2015-12-20 03:06:35 -08:00
|
|
|
}
|
2016-01-31 14:04:54 -08:00
|
|
|
|
|
|
|
source->last_audio_input_buf_size = 0;
|
2015-12-17 04:28:35 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void source_output_audio_push_back(obs_source_t *source,
|
2019-06-22 22:13:45 -07:00
|
|
|
const struct audio_data *in)
|
2015-12-17 04:28:35 -08:00
|
|
|
{
|
|
|
|
audio_t *audio = obs->audio.audio;
|
|
|
|
size_t channels = audio_output_get_channels(audio);
|
libobs: Implement new audio subsystem
The new audio subsystem fixes two issues:
- First Primary issue it fixes is the ability for parent sources to
intercept the audio of child sources, and do custom processing on
them. The main reason for this was the ability to do custom
cross-fading in transitions, but it's also useful for things such as
side-chain effects, applying audio effects to entire scenes, applying
scene-specific audio filters on sub-sources, and other such
possibilities.
- The secondary issue that needed fixing was audio buffering.
Previously, audio buffering was always a fixed buffer size, so it
would always have exactly a certain number of milliseconds of audio
buffering (and thus output delay). Instead, it now dynamically
increases audio buffering only as necessary, minimizing output delay,
and removing the need for users to have to worry about an audio
buffering setting.
The new design makes it so that audio from the leaves of the scene graph
flow to the root nodes, and can be intercepted by parent sources. Each
audio source handles its own buffering, and each audio tick a specific
number of audio frames are popped from the front of the circular buffer
on each audio source. Composite sources (such as scenes) can access the
audio for child sources and do custom processing or mixing on that
audio. Composite sources use the audio_render callback of sources to do
synchronous or deferred audio processing per audio tick. Things like
scenes now mix audio from their sub-sources.
2015-12-20 03:06:35 -08:00
|
|
|
size_t size = in->frames * sizeof(float);
|
|
|
|
|
|
|
|
/* do not allow the circular buffers to become too big */
|
|
|
|
if ((source->audio_input_buf[0].size + size) > MAX_BUF_SIZE)
|
|
|
|
return;
|
2015-12-17 04:28:35 -08:00
|
|
|
|
|
|
|
for (size_t i = 0; i < channels; i++)
|
2019-06-22 22:13:45 -07:00
|
|
|
circlebuf_push_back(&source->audio_input_buf[i], in->data[i],
|
|
|
|
size);
|
2016-01-31 14:04:54 -08:00
|
|
|
|
|
|
|
/* reset audio input buffer size to ensure that audio doesn't get
|
|
|
|
* perpetually cut */
|
|
|
|
source->last_audio_input_buf_size = 0;
|
2015-12-17 04:28:35 -08:00
|
|
|
}
|
|
|
|
|
2015-12-20 01:16:43 -08:00
|
|
|
static inline bool source_muted(obs_source_t *source, uint64_t os_time)
|
|
|
|
{
|
libobs: Implement new audio subsystem
The new audio subsystem fixes two issues:
- First Primary issue it fixes is the ability for parent sources to
intercept the audio of child sources, and do custom processing on
them. The main reason for this was the ability to do custom
cross-fading in transitions, but it's also useful for things such as
side-chain effects, applying audio effects to entire scenes, applying
scene-specific audio filters on sub-sources, and other such
possibilities.
- The secondary issue that needed fixing was audio buffering.
Previously, audio buffering was always a fixed buffer size, so it
would always have exactly a certain number of milliseconds of audio
buffering (and thus output delay). Instead, it now dynamically
increases audio buffering only as necessary, minimizing output delay,
and removing the need for users to have to worry about an audio
buffering setting.
The new design makes it so that audio from the leaves of the scene graph
flow to the root nodes, and can be intercepted by parent sources. Each
audio source handles its own buffering, and each audio tick a specific
number of audio frames are popped from the front of the circular buffer
on each audio source. Composite sources (such as scenes) can access the
audio for child sources and do custom processing or mixing on that
audio. Composite sources use the audio_render callback of sources to do
synchronous or deferred audio processing per audio tick. Things like
scenes now mix audio from their sub-sources.
2015-12-20 03:06:35 -08:00
|
|
|
if (source->push_to_mute_enabled && source->user_push_to_mute_pressed)
|
2019-06-22 22:13:45 -07:00
|
|
|
source->push_to_mute_stop_time =
|
|
|
|
os_time + source->push_to_mute_delay * 1000000;
|
2015-12-20 01:16:43 -08:00
|
|
|
|
libobs: Implement new audio subsystem
The new audio subsystem fixes two issues:
- First Primary issue it fixes is the ability for parent sources to
intercept the audio of child sources, and do custom processing on
them. The main reason for this was the ability to do custom
cross-fading in transitions, but it's also useful for things such as
side-chain effects, applying audio effects to entire scenes, applying
scene-specific audio filters on sub-sources, and other such
possibilities.
- The secondary issue that needed fixing was audio buffering.
Previously, audio buffering was always a fixed buffer size, so it
would always have exactly a certain number of milliseconds of audio
buffering (and thus output delay). Instead, it now dynamically
increases audio buffering only as necessary, minimizing output delay,
and removing the need for users to have to worry about an audio
buffering setting.
The new design makes it so that audio from the leaves of the scene graph
flow to the root nodes, and can be intercepted by parent sources. Each
audio source handles its own buffering, and each audio tick a specific
number of audio frames are popped from the front of the circular buffer
on each audio source. Composite sources (such as scenes) can access the
audio for child sources and do custom processing or mixing on that
audio. Composite sources use the audio_render callback of sources to do
synchronous or deferred audio processing per audio tick. Things like
scenes now mix audio from their sub-sources.
2015-12-20 03:06:35 -08:00
|
|
|
if (source->push_to_talk_enabled && source->user_push_to_talk_pressed)
|
2019-06-22 22:13:45 -07:00
|
|
|
source->push_to_talk_stop_time =
|
|
|
|
os_time + source->push_to_talk_delay * 1000000;
|
2015-12-20 01:16:43 -08:00
|
|
|
|
libobs: Implement new audio subsystem
The new audio subsystem fixes two issues:
- First Primary issue it fixes is the ability for parent sources to
intercept the audio of child sources, and do custom processing on
them. The main reason for this was the ability to do custom
cross-fading in transitions, but it's also useful for things such as
side-chain effects, applying audio effects to entire scenes, applying
scene-specific audio filters on sub-sources, and other such
possibilities.
- The secondary issue that needed fixing was audio buffering.
Previously, audio buffering was always a fixed buffer size, so it
would always have exactly a certain number of milliseconds of audio
buffering (and thus output delay). Instead, it now dynamically
increases audio buffering only as necessary, minimizing output delay,
and removing the need for users to have to worry about an audio
buffering setting.
The new design makes it so that audio from the leaves of the scene graph
flow to the root nodes, and can be intercepted by parent sources. Each
audio source handles its own buffering, and each audio tick a specific
number of audio frames are popped from the front of the circular buffer
on each audio source. Composite sources (such as scenes) can access the
audio for child sources and do custom processing or mixing on that
audio. Composite sources use the audio_render callback of sources to do
synchronous or deferred audio processing per audio tick. Things like
scenes now mix audio from their sub-sources.
2015-12-20 03:06:35 -08:00
|
|
|
bool push_to_mute_active = source->user_push_to_mute_pressed ||
|
2019-06-22 22:13:45 -07:00
|
|
|
os_time < source->push_to_mute_stop_time;
|
libobs: Implement new audio subsystem
The new audio subsystem fixes two issues:
- First Primary issue it fixes is the ability for parent sources to
intercept the audio of child sources, and do custom processing on
them. The main reason for this was the ability to do custom
cross-fading in transitions, but it's also useful for things such as
side-chain effects, applying audio effects to entire scenes, applying
scene-specific audio filters on sub-sources, and other such
possibilities.
- The secondary issue that needed fixing was audio buffering.
Previously, audio buffering was always a fixed buffer size, so it
would always have exactly a certain number of milliseconds of audio
buffering (and thus output delay). Instead, it now dynamically
increases audio buffering only as necessary, minimizing output delay,
and removing the need for users to have to worry about an audio
buffering setting.
The new design makes it so that audio from the leaves of the scene graph
flow to the root nodes, and can be intercepted by parent sources. Each
audio source handles its own buffering, and each audio tick a specific
number of audio frames are popped from the front of the circular buffer
on each audio source. Composite sources (such as scenes) can access the
audio for child sources and do custom processing or mixing on that
audio. Composite sources use the audio_render callback of sources to do
synchronous or deferred audio processing per audio tick. Things like
scenes now mix audio from their sub-sources.
2015-12-20 03:06:35 -08:00
|
|
|
bool push_to_talk_active = source->user_push_to_talk_pressed ||
|
2019-06-22 22:13:45 -07:00
|
|
|
os_time < source->push_to_talk_stop_time;
|
2015-12-20 01:16:43 -08:00
|
|
|
|
libobs: Implement new audio subsystem
The new audio subsystem fixes two issues:
- First Primary issue it fixes is the ability for parent sources to
intercept the audio of child sources, and do custom processing on
them. The main reason for this was the ability to do custom
cross-fading in transitions, but it's also useful for things such as
side-chain effects, applying audio effects to entire scenes, applying
scene-specific audio filters on sub-sources, and other such
possibilities.
- The secondary issue that needed fixing was audio buffering.
Previously, audio buffering was always a fixed buffer size, so it
would always have exactly a certain number of milliseconds of audio
buffering (and thus output delay). Instead, it now dynamically
increases audio buffering only as necessary, minimizing output delay,
and removing the need for users to have to worry about an audio
buffering setting.
The new design makes it so that audio from the leaves of the scene graph
flow to the root nodes, and can be intercepted by parent sources. Each
audio source handles its own buffering, and each audio tick a specific
number of audio frames are popped from the front of the circular buffer
on each audio source. Composite sources (such as scenes) can access the
audio for child sources and do custom processing or mixing on that
audio. Composite sources use the audio_render callback of sources to do
synchronous or deferred audio processing per audio tick. Things like
scenes now mix audio from their sub-sources.
2015-12-20 03:06:35 -08:00
|
|
|
return !source->enabled || source->user_muted ||
|
2019-06-22 22:13:45 -07:00
|
|
|
(source->push_to_mute_enabled && push_to_mute_active) ||
|
|
|
|
(source->push_to_talk_enabled && !push_to_talk_active);
|
2015-12-20 01:16:43 -08:00
|
|
|
}
|
|
|
|
|
2015-12-17 04:28:35 -08:00
|
|
|
static void source_output_audio_data(obs_source_t *source,
|
2019-06-22 22:13:45 -07:00
|
|
|
const struct audio_data *data)
|
2013-10-24 00:57:55 -07:00
|
|
|
{
|
2015-12-20 01:33:12 -08:00
|
|
|
size_t sample_rate = audio_output_get_sample_rate(obs->audio.audio);
|
2013-10-24 00:57:55 -07:00
|
|
|
struct audio_data in = *data;
|
2014-01-12 01:40:51 -08:00
|
|
|
uint64_t diff;
|
2014-10-19 09:21:39 -07:00
|
|
|
uint64_t os_time = os_gettime_ns();
|
2016-01-16 10:36:53 -08:00
|
|
|
int64_t sync_offset;
|
2015-12-17 04:28:35 -08:00
|
|
|
bool using_direct_ts = false;
|
|
|
|
bool push_back = false;
|
2013-10-24 00:57:55 -07:00
|
|
|
|
2014-10-19 09:21:39 -07:00
|
|
|
/* detects 'directly' set timestamps as long as they're within
|
|
|
|
* a certain threshold */
|
|
|
|
if (uint64_diff(in.timestamp, os_time) < MAX_TS_VAR) {
|
|
|
|
source->timing_adjust = 0;
|
|
|
|
source->timing_set = true;
|
2015-12-17 04:28:35 -08:00
|
|
|
using_direct_ts = true;
|
|
|
|
}
|
2013-10-24 00:57:55 -07:00
|
|
|
|
2015-12-17 04:28:35 -08:00
|
|
|
if (!source->timing_set) {
|
2014-10-19 09:21:39 -07:00
|
|
|
reset_audio_timing(source, in.timestamp, os_time);
|
2014-01-12 01:40:51 -08:00
|
|
|
|
2014-08-28 18:24:54 -07:00
|
|
|
} else if (source->next_audio_ts_min != 0) {
|
2014-10-19 09:21:39 -07:00
|
|
|
diff = uint64_diff(source->next_audio_ts_min, in.timestamp);
|
2014-02-18 13:38:51 -08:00
|
|
|
|
2014-10-09 12:14:51 -07:00
|
|
|
/* smooth audio if within threshold */
|
2015-12-17 04:28:35 -08:00
|
|
|
if (diff > MAX_TS_VAR && !using_direct_ts)
|
2014-02-18 13:38:51 -08:00
|
|
|
handle_ts_jump(source, source->next_audio_ts_min,
|
2019-06-22 22:13:45 -07:00
|
|
|
in.timestamp, diff, os_time);
|
2019-07-05 08:41:34 -07:00
|
|
|
else if (diff < TS_SMOOTHING_THRESHOLD) {
|
|
|
|
if (source->async_unbuffered && source->async_decoupled)
|
|
|
|
source->timing_adjust = os_time - in.timestamp;
|
2014-02-18 13:38:51 -08:00
|
|
|
in.timestamp = source->next_audio_ts_min;
|
2019-07-05 08:41:34 -07:00
|
|
|
}
|
2013-10-24 00:57:55 -07:00
|
|
|
}
|
|
|
|
|
2017-02-05 21:37:35 -08:00
|
|
|
source->last_audio_ts = in.timestamp;
|
2019-06-22 22:13:45 -07:00
|
|
|
source->next_audio_ts_min =
|
|
|
|
in.timestamp + conv_frames_to_time(sample_rate, in.frames);
|
2014-01-10 11:45:53 -08:00
|
|
|
|
2016-01-16 10:36:53 -08:00
|
|
|
in.timestamp += source->timing_adjust;
|
2014-02-09 04:51:06 -08:00
|
|
|
|
2016-01-30 00:32:40 -08:00
|
|
|
pthread_mutex_lock(&source->audio_buf_mutex);
|
|
|
|
|
libobs: Implement new audio subsystem
The new audio subsystem fixes two issues:
- First Primary issue it fixes is the ability for parent sources to
intercept the audio of child sources, and do custom processing on
them. The main reason for this was the ability to do custom
cross-fading in transitions, but it's also useful for things such as
side-chain effects, applying audio effects to entire scenes, applying
scene-specific audio filters on sub-sources, and other such
possibilities.
- The secondary issue that needed fixing was audio buffering.
Previously, audio buffering was always a fixed buffer size, so it
would always have exactly a certain number of milliseconds of audio
buffering (and thus output delay). Instead, it now dynamically
increases audio buffering only as necessary, minimizing output delay,
and removing the need for users to have to worry about an audio
buffering setting.
The new design makes it so that audio from the leaves of the scene graph
flow to the root nodes, and can be intercepted by parent sources. Each
audio source handles its own buffering, and each audio tick a specific
number of audio frames are popped from the front of the circular buffer
on each audio source. Composite sources (such as scenes) can access the
audio for child sources and do custom processing or mixing on that
audio. Composite sources use the audio_render callback of sources to do
synchronous or deferred audio processing per audio tick. Things like
scenes now mix audio from their sub-sources.
2015-12-20 03:06:35 -08:00
|
|
|
if (source->next_audio_sys_ts_min == in.timestamp) {
|
2016-01-30 00:32:40 -08:00
|
|
|
push_back = true;
|
2016-01-16 10:45:49 -08:00
|
|
|
|
|
|
|
} else if (source->next_audio_sys_ts_min) {
|
libobs: Implement new audio subsystem
The new audio subsystem fixes two issues:
- First Primary issue it fixes is the ability for parent sources to
intercept the audio of child sources, and do custom processing on
them. The main reason for this was the ability to do custom
cross-fading in transitions, but it's also useful for things such as
side-chain effects, applying audio effects to entire scenes, applying
scene-specific audio filters on sub-sources, and other such
possibilities.
- The secondary issue that needed fixing was audio buffering.
Previously, audio buffering was always a fixed buffer size, so it
would always have exactly a certain number of milliseconds of audio
buffering (and thus output delay). Instead, it now dynamically
increases audio buffering only as necessary, minimizing output delay,
and removing the need for users to have to worry about an audio
buffering setting.
The new design makes it so that audio from the leaves of the scene graph
flow to the root nodes, and can be intercepted by parent sources. Each
audio source handles its own buffering, and each audio tick a specific
number of audio frames are popped from the front of the circular buffer
on each audio source. Composite sources (such as scenes) can access the
audio for child sources and do custom processing or mixing on that
audio. Composite sources use the audio_render callback of sources to do
synchronous or deferred audio processing per audio tick. Things like
scenes now mix audio from their sub-sources.
2015-12-20 03:06:35 -08:00
|
|
|
diff = uint64_diff(source->next_audio_sys_ts_min, in.timestamp);
|
2016-01-16 10:45:49 -08:00
|
|
|
|
|
|
|
if (diff < TS_SMOOTHING_THRESHOLD) {
|
2016-01-30 00:32:40 -08:00
|
|
|
push_back = true;
|
|
|
|
|
2019-06-22 22:13:45 -07:00
|
|
|
/* This typically only happens if used with async video when
|
2016-01-30 00:32:40 -08:00
|
|
|
* audio/video start transitioning in to a timestamp jump.
|
|
|
|
* Audio will typically have a timestamp jump, and then video
|
|
|
|
* will have a timestamp jump. If that case is encountered,
|
|
|
|
* just clear the audio data in that small window and force a
|
|
|
|
* resync. This handles all cases rather than just looping. */
|
2016-02-21 10:34:01 -08:00
|
|
|
} else if (diff > MAX_TS_VAR) {
|
2019-06-22 22:13:45 -07:00
|
|
|
reset_audio_timing(source, data->timestamp, os_time);
|
2016-01-16 10:45:49 -08:00
|
|
|
in.timestamp = data->timestamp + source->timing_adjust;
|
|
|
|
}
|
libobs: Implement new audio subsystem
The new audio subsystem fixes two issues:
- First Primary issue it fixes is the ability for parent sources to
intercept the audio of child sources, and do custom processing on
them. The main reason for this was the ability to do custom
cross-fading in transitions, but it's also useful for things such as
side-chain effects, applying audio effects to entire scenes, applying
scene-specific audio filters on sub-sources, and other such
possibilities.
- The secondary issue that needed fixing was audio buffering.
Previously, audio buffering was always a fixed buffer size, so it
would always have exactly a certain number of milliseconds of audio
buffering (and thus output delay). Instead, it now dynamically
increases audio buffering only as necessary, minimizing output delay,
and removing the need for users to have to worry about an audio
buffering setting.
The new design makes it so that audio from the leaves of the scene graph
flow to the root nodes, and can be intercepted by parent sources. Each
audio source handles its own buffering, and each audio tick a specific
number of audio frames are popped from the front of the circular buffer
on each audio source. Composite sources (such as scenes) can access the
audio for child sources and do custom processing or mixing on that
audio. Composite sources use the audio_render callback of sources to do
synchronous or deferred audio processing per audio tick. Things like
scenes now mix audio from their sub-sources.
2015-12-20 03:06:35 -08:00
|
|
|
}
|
|
|
|
|
2016-01-16 10:36:53 -08:00
|
|
|
sync_offset = source->sync_offset;
|
|
|
|
in.timestamp += sync_offset;
|
2016-01-16 10:30:09 -08:00
|
|
|
in.timestamp -= source->resample_offset;
|
|
|
|
|
2019-06-22 22:13:45 -07:00
|
|
|
source->next_audio_sys_ts_min =
|
|
|
|
source->next_audio_ts_min + source->timing_adjust;
|
2016-01-16 10:36:53 -08:00
|
|
|
|
|
|
|
if (source->last_sync_offset != sync_offset) {
|
|
|
|
if (source->last_sync_offset)
|
|
|
|
push_back = false;
|
|
|
|
source->last_sync_offset = sync_offset;
|
|
|
|
}
|
2015-12-17 04:28:35 -08:00
|
|
|
|
2017-02-05 21:37:35 -08:00
|
|
|
if (source->monitoring_type != OBS_MONITORING_TYPE_MONITOR_ONLY) {
|
|
|
|
if (push_back && source->audio_ts)
|
|
|
|
source_output_audio_push_back(source, &in);
|
|
|
|
else
|
|
|
|
source_output_audio_place(source, &in);
|
|
|
|
}
|
2015-12-17 04:28:35 -08:00
|
|
|
|
|
|
|
pthread_mutex_unlock(&source->audio_buf_mutex);
|
|
|
|
|
2017-02-05 21:30:53 -08:00
|
|
|
source_signal_audio_data(source, data, source_muted(source, os_time));
|
2013-10-24 00:57:55 -07:00
|
|
|
}
|
|
|
|
|
2013-10-26 14:32:06 -07:00
|
|
|
enum convert_type {
|
|
|
|
CONVERT_NONE,
|
|
|
|
CONVERT_NV12,
|
|
|
|
CONVERT_420,
|
2019-08-11 11:26:22 -07:00
|
|
|
CONVERT_420_A,
|
2019-06-17 22:25:18 -07:00
|
|
|
CONVERT_422,
|
2019-08-11 11:26:22 -07:00
|
|
|
CONVERT_422_A,
|
|
|
|
CONVERT_422_PACK,
|
2019-04-02 09:03:57 -07:00
|
|
|
CONVERT_444,
|
2019-08-11 11:26:22 -07:00
|
|
|
CONVERT_444_A,
|
|
|
|
CONVERT_444_A_PACK,
|
2019-04-22 23:38:26 -07:00
|
|
|
CONVERT_800,
|
|
|
|
CONVERT_RGB_LIMITED,
|
2019-05-30 06:05:53 -07:00
|
|
|
CONVERT_BGR3,
|
2013-10-26 14:32:06 -07:00
|
|
|
};
|
|
|
|
|
2019-04-22 23:38:26 -07:00
|
|
|
static inline enum convert_type get_convert_type(enum video_format format,
|
2019-06-22 22:13:45 -07:00
|
|
|
bool full_range)
|
2013-10-26 14:32:06 -07:00
|
|
|
{
|
2013-10-31 10:28:47 -07:00
|
|
|
switch (format) {
|
2013-10-26 14:32:06 -07:00
|
|
|
case VIDEO_FORMAT_I420:
|
|
|
|
return CONVERT_420;
|
|
|
|
case VIDEO_FORMAT_NV12:
|
|
|
|
return CONVERT_NV12;
|
2019-04-02 09:03:57 -07:00
|
|
|
case VIDEO_FORMAT_I444:
|
|
|
|
return CONVERT_444;
|
2019-06-17 22:25:18 -07:00
|
|
|
case VIDEO_FORMAT_I422:
|
|
|
|
return CONVERT_422;
|
2013-10-26 14:32:06 -07:00
|
|
|
|
|
|
|
case VIDEO_FORMAT_YVYU:
|
|
|
|
case VIDEO_FORMAT_YUY2:
|
|
|
|
case VIDEO_FORMAT_UYVY:
|
2019-08-11 11:26:22 -07:00
|
|
|
return CONVERT_422_PACK;
|
2013-10-26 14:32:06 -07:00
|
|
|
|
2016-02-24 20:02:11 -08:00
|
|
|
case VIDEO_FORMAT_Y800:
|
2019-04-22 23:38:26 -07:00
|
|
|
return CONVERT_800;
|
|
|
|
|
2014-01-19 02:16:41 -08:00
|
|
|
case VIDEO_FORMAT_NONE:
|
2013-10-26 14:32:06 -07:00
|
|
|
case VIDEO_FORMAT_RGBA:
|
|
|
|
case VIDEO_FORMAT_BGRA:
|
|
|
|
case VIDEO_FORMAT_BGRX:
|
2019-04-22 23:38:26 -07:00
|
|
|
return full_range ? CONVERT_NONE : CONVERT_RGB_LIMITED;
|
2019-05-30 06:05:53 -07:00
|
|
|
|
|
|
|
case VIDEO_FORMAT_BGR3:
|
|
|
|
return CONVERT_BGR3;
|
2019-08-11 11:26:22 -07:00
|
|
|
|
|
|
|
case VIDEO_FORMAT_I40A:
|
|
|
|
return CONVERT_420_A;
|
|
|
|
|
|
|
|
case VIDEO_FORMAT_I42A:
|
|
|
|
return CONVERT_422_A;
|
|
|
|
|
|
|
|
case VIDEO_FORMAT_YUVA:
|
|
|
|
return CONVERT_444_A;
|
|
|
|
|
|
|
|
case VIDEO_FORMAT_AYUV:
|
|
|
|
return CONVERT_444_A_PACK;
|
2013-10-26 14:32:06 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
return CONVERT_NONE;
|
|
|
|
}
|
|
|
|
|
2014-04-23 20:57:56 -07:00
|
|
|
static inline bool set_packed422_sizes(struct obs_source *source,
|
2019-06-22 22:13:45 -07:00
|
|
|
const struct obs_source_frame *frame)
|
2014-04-23 20:57:56 -07:00
|
|
|
{
|
2019-08-09 20:43:14 -07:00
|
|
|
source->async_convert_width[0] = frame->width / 2;
|
|
|
|
source->async_convert_height[0] = frame->height;
|
|
|
|
source->async_texture_formats[0] = GS_BGRA;
|
|
|
|
source->async_channel_count = 1;
|
2014-05-30 02:23:36 -07:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2019-08-11 11:26:22 -07:00
|
|
|
static inline bool
|
|
|
|
set_packed444_alpha_sizes(struct obs_source *source,
|
|
|
|
const struct obs_source_frame *frame)
|
|
|
|
{
|
|
|
|
source->async_convert_width[0] = frame->width;
|
|
|
|
source->async_convert_height[0] = frame->height;
|
|
|
|
source->async_texture_formats[0] = GS_BGRA;
|
|
|
|
source->async_channel_count = 1;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2019-04-02 09:03:57 -07:00
|
|
|
static inline bool set_planar444_sizes(struct obs_source *source,
|
2019-06-22 22:13:45 -07:00
|
|
|
const struct obs_source_frame *frame)
|
2019-04-02 09:03:57 -07:00
|
|
|
{
|
2019-08-09 20:43:14 -07:00
|
|
|
source->async_convert_width[0] = frame->width;
|
|
|
|
source->async_convert_width[1] = frame->width;
|
|
|
|
source->async_convert_width[2] = frame->width;
|
|
|
|
source->async_convert_height[0] = frame->height;
|
|
|
|
source->async_convert_height[1] = frame->height;
|
|
|
|
source->async_convert_height[2] = frame->height;
|
|
|
|
source->async_texture_formats[0] = GS_R8;
|
|
|
|
source->async_texture_formats[1] = GS_R8;
|
|
|
|
source->async_texture_formats[2] = GS_R8;
|
|
|
|
source->async_channel_count = 3;
|
2019-04-02 09:03:57 -07:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2019-08-11 11:26:22 -07:00
|
|
|
static inline bool
|
|
|
|
set_planar444_alpha_sizes(struct obs_source *source,
|
|
|
|
const struct obs_source_frame *frame)
|
|
|
|
{
|
|
|
|
source->async_convert_width[0] = frame->width;
|
|
|
|
source->async_convert_width[1] = frame->width;
|
|
|
|
source->async_convert_width[2] = frame->width;
|
|
|
|
source->async_convert_width[3] = frame->width;
|
|
|
|
source->async_convert_height[0] = frame->height;
|
|
|
|
source->async_convert_height[1] = frame->height;
|
|
|
|
source->async_convert_height[2] = frame->height;
|
|
|
|
source->async_convert_height[3] = frame->height;
|
|
|
|
source->async_texture_formats[0] = GS_R8;
|
|
|
|
source->async_texture_formats[1] = GS_R8;
|
|
|
|
source->async_texture_formats[2] = GS_R8;
|
|
|
|
source->async_texture_formats[3] = GS_R8;
|
|
|
|
source->async_channel_count = 4;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2014-05-30 02:23:36 -07:00
|
|
|
static inline bool set_planar420_sizes(struct obs_source *source,
|
2019-06-22 22:13:45 -07:00
|
|
|
const struct obs_source_frame *frame)
|
2014-05-30 02:23:36 -07:00
|
|
|
{
|
2019-08-09 20:43:14 -07:00
|
|
|
source->async_convert_width[0] = frame->width;
|
|
|
|
source->async_convert_width[1] = frame->width / 2;
|
|
|
|
source->async_convert_width[2] = frame->width / 2;
|
|
|
|
source->async_convert_height[0] = frame->height;
|
|
|
|
source->async_convert_height[1] = frame->height / 2;
|
|
|
|
source->async_convert_height[2] = frame->height / 2;
|
|
|
|
source->async_texture_formats[0] = GS_R8;
|
|
|
|
source->async_texture_formats[1] = GS_R8;
|
|
|
|
source->async_texture_formats[2] = GS_R8;
|
|
|
|
source->async_channel_count = 3;
|
2014-04-23 20:57:56 -07:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2019-08-11 11:26:22 -07:00
|
|
|
static inline bool
|
|
|
|
set_planar420_alpha_sizes(struct obs_source *source,
|
|
|
|
const struct obs_source_frame *frame)
|
|
|
|
{
|
|
|
|
source->async_convert_width[0] = frame->width;
|
|
|
|
source->async_convert_width[1] = frame->width / 2;
|
|
|
|
source->async_convert_width[2] = frame->width / 2;
|
|
|
|
source->async_convert_width[3] = frame->width;
|
|
|
|
source->async_convert_height[0] = frame->height;
|
|
|
|
source->async_convert_height[1] = frame->height / 2;
|
|
|
|
source->async_convert_height[2] = frame->height / 2;
|
|
|
|
source->async_convert_height[3] = frame->height;
|
|
|
|
source->async_texture_formats[0] = GS_R8;
|
|
|
|
source->async_texture_formats[1] = GS_R8;
|
|
|
|
source->async_texture_formats[2] = GS_R8;
|
|
|
|
source->async_texture_formats[3] = GS_R8;
|
|
|
|
source->async_channel_count = 4;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2019-06-17 22:25:18 -07:00
|
|
|
static inline bool set_planar422_sizes(struct obs_source *source,
|
|
|
|
const struct obs_source_frame *frame)
|
|
|
|
{
|
2019-08-09 20:43:14 -07:00
|
|
|
source->async_convert_width[0] = frame->width;
|
|
|
|
source->async_convert_width[1] = frame->width / 2;
|
|
|
|
source->async_convert_width[2] = frame->width / 2;
|
|
|
|
source->async_convert_height[0] = frame->height;
|
|
|
|
source->async_convert_height[1] = frame->height;
|
|
|
|
source->async_convert_height[2] = frame->height;
|
|
|
|
source->async_texture_formats[0] = GS_R8;
|
|
|
|
source->async_texture_formats[1] = GS_R8;
|
|
|
|
source->async_texture_formats[2] = GS_R8;
|
|
|
|
source->async_channel_count = 3;
|
2019-06-17 22:25:18 -07:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2019-08-11 11:26:22 -07:00
|
|
|
static inline bool
|
|
|
|
set_planar422_alpha_sizes(struct obs_source *source,
|
|
|
|
const struct obs_source_frame *frame)
|
|
|
|
{
|
|
|
|
source->async_convert_width[0] = frame->width;
|
|
|
|
source->async_convert_width[1] = frame->width / 2;
|
|
|
|
source->async_convert_width[2] = frame->width / 2;
|
|
|
|
source->async_convert_width[3] = frame->width;
|
|
|
|
source->async_convert_height[0] = frame->height;
|
|
|
|
source->async_convert_height[1] = frame->height;
|
|
|
|
source->async_convert_height[2] = frame->height;
|
|
|
|
source->async_convert_height[3] = frame->height;
|
|
|
|
source->async_texture_formats[0] = GS_R8;
|
|
|
|
source->async_texture_formats[1] = GS_R8;
|
|
|
|
source->async_texture_formats[2] = GS_R8;
|
|
|
|
source->async_texture_formats[3] = GS_R8;
|
|
|
|
source->async_channel_count = 4;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2014-12-18 11:37:46 -08:00
|
|
|
static inline bool set_nv12_sizes(struct obs_source *source,
|
2019-06-22 22:13:45 -07:00
|
|
|
const struct obs_source_frame *frame)
|
2014-12-18 11:37:46 -08:00
|
|
|
{
|
2019-08-09 20:43:14 -07:00
|
|
|
source->async_convert_width[0] = frame->width;
|
|
|
|
source->async_convert_width[1] = frame->width / 2;
|
|
|
|
source->async_convert_height[0] = frame->height;
|
|
|
|
source->async_convert_height[1] = frame->height / 2;
|
|
|
|
source->async_texture_formats[0] = GS_R8;
|
|
|
|
source->async_texture_formats[1] = GS_R8G8;
|
|
|
|
source->async_channel_count = 2;
|
2014-12-18 11:37:46 -08:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2019-04-22 23:38:26 -07:00
|
|
|
static inline bool set_y800_sizes(struct obs_source *source,
|
2019-06-22 22:13:45 -07:00
|
|
|
const struct obs_source_frame *frame)
|
2019-04-22 23:38:26 -07:00
|
|
|
{
|
2019-08-09 20:43:14 -07:00
|
|
|
source->async_convert_width[0] = frame->width;
|
|
|
|
source->async_convert_height[0] = frame->height;
|
|
|
|
source->async_texture_formats[0] = GS_R8;
|
|
|
|
source->async_channel_count = 1;
|
2019-04-22 23:38:26 -07:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline bool set_rgb_limited_sizes(struct obs_source *source,
|
2019-06-22 22:13:45 -07:00
|
|
|
const struct obs_source_frame *frame)
|
2019-04-22 23:38:26 -07:00
|
|
|
{
|
2019-08-09 20:43:14 -07:00
|
|
|
source->async_convert_width[0] = frame->width;
|
|
|
|
source->async_convert_height[0] = frame->height;
|
|
|
|
source->async_texture_formats[0] = convert_video_format(frame->format);
|
|
|
|
source->async_channel_count = 1;
|
2019-04-22 23:38:26 -07:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2019-05-30 06:05:53 -07:00
|
|
|
static inline bool set_bgr3_sizes(struct obs_source *source,
|
2019-06-22 22:13:45 -07:00
|
|
|
const struct obs_source_frame *frame)
|
2019-05-30 06:05:53 -07:00
|
|
|
{
|
2019-08-09 20:43:14 -07:00
|
|
|
source->async_convert_width[0] = frame->width * 3;
|
|
|
|
source->async_convert_height[0] = frame->height;
|
|
|
|
source->async_texture_formats[0] = GS_R8;
|
|
|
|
source->async_channel_count = 1;
|
2019-05-30 06:05:53 -07:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2014-04-23 20:57:56 -07:00
|
|
|
static inline bool init_gpu_conversion(struct obs_source *source,
|
2019-06-22 22:13:45 -07:00
|
|
|
const struct obs_source_frame *frame)
|
2014-04-23 20:57:56 -07:00
|
|
|
{
|
2019-04-22 23:38:26 -07:00
|
|
|
switch (get_convert_type(frame->format, frame->full_range)) {
|
2019-08-11 11:26:22 -07:00
|
|
|
case CONVERT_422_PACK:
|
2019-06-22 22:13:45 -07:00
|
|
|
return set_packed422_sizes(source, frame);
|
2014-05-30 02:23:36 -07:00
|
|
|
|
2019-06-22 22:13:45 -07:00
|
|
|
case CONVERT_420:
|
|
|
|
return set_planar420_sizes(source, frame);
|
2019-04-02 09:03:57 -07:00
|
|
|
|
2019-06-17 22:25:18 -07:00
|
|
|
case CONVERT_422:
|
|
|
|
return set_planar422_sizes(source, frame);
|
|
|
|
|
2019-06-22 22:13:45 -07:00
|
|
|
case CONVERT_NV12:
|
|
|
|
return set_nv12_sizes(source, frame);
|
2014-04-23 20:57:56 -07:00
|
|
|
|
2019-06-22 22:13:45 -07:00
|
|
|
case CONVERT_444:
|
|
|
|
return set_planar444_sizes(source, frame);
|
2019-04-22 23:38:26 -07:00
|
|
|
|
2019-06-22 22:13:45 -07:00
|
|
|
case CONVERT_800:
|
|
|
|
return set_y800_sizes(source, frame);
|
2019-04-22 23:38:26 -07:00
|
|
|
|
2019-06-22 22:13:45 -07:00
|
|
|
case CONVERT_RGB_LIMITED:
|
|
|
|
return set_rgb_limited_sizes(source, frame);
|
2019-05-30 06:05:53 -07:00
|
|
|
|
2019-06-22 22:13:45 -07:00
|
|
|
case CONVERT_BGR3:
|
|
|
|
return set_bgr3_sizes(source, frame);
|
2014-04-23 20:57:56 -07:00
|
|
|
|
2019-08-11 11:26:22 -07:00
|
|
|
case CONVERT_420_A:
|
|
|
|
return set_planar420_alpha_sizes(source, frame);
|
|
|
|
|
|
|
|
case CONVERT_422_A:
|
|
|
|
return set_planar422_alpha_sizes(source, frame);
|
|
|
|
|
|
|
|
case CONVERT_444_A:
|
|
|
|
return set_planar444_alpha_sizes(source, frame);
|
|
|
|
|
|
|
|
case CONVERT_444_A_PACK:
|
|
|
|
return set_packed444_alpha_sizes(source, frame);
|
|
|
|
|
2019-06-22 22:13:45 -07:00
|
|
|
case CONVERT_NONE:
|
|
|
|
assert(false && "No conversion requested");
|
|
|
|
break;
|
2014-04-23 20:57:56 -07:00
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2016-03-15 20:26:50 -07:00
|
|
|
bool set_async_texture_size(struct obs_source *source,
|
2019-06-22 22:13:45 -07:00
|
|
|
const struct obs_source_frame *frame)
|
2014-04-23 20:57:56 -07:00
|
|
|
{
|
2019-06-22 22:13:45 -07:00
|
|
|
enum convert_type cur =
|
|
|
|
get_convert_type(frame->format, frame->full_range);
|
2015-01-04 00:18:36 -08:00
|
|
|
|
2019-06-22 22:13:45 -07:00
|
|
|
if (source->async_width == frame->width &&
|
|
|
|
source->async_height == frame->height &&
|
|
|
|
source->async_format == frame->format &&
|
2019-04-22 23:38:26 -07:00
|
|
|
source->async_full_range == frame->full_range)
|
2015-01-04 00:18:36 -08:00
|
|
|
return true;
|
|
|
|
|
2019-06-22 22:13:45 -07:00
|
|
|
source->async_width = frame->width;
|
|
|
|
source->async_height = frame->height;
|
|
|
|
source->async_format = frame->format;
|
2019-04-22 23:38:26 -07:00
|
|
|
source->async_full_range = frame->full_range;
|
2014-04-23 20:57:56 -07:00
|
|
|
|
2016-11-05 03:34:02 -07:00
|
|
|
gs_enter_context(obs->video.graphics);
|
|
|
|
|
2019-08-09 20:43:14 -07:00
|
|
|
for (size_t c = 0; c < MAX_AV_PLANES; c++) {
|
|
|
|
gs_texture_destroy(source->async_textures[c]);
|
|
|
|
source->async_textures[c] = NULL;
|
|
|
|
gs_texture_destroy(source->async_prev_textures[c]);
|
|
|
|
source->async_prev_textures[c] = NULL;
|
|
|
|
}
|
|
|
|
|
2016-03-15 20:37:19 -07:00
|
|
|
gs_texrender_destroy(source->async_texrender);
|
2016-03-15 20:39:36 -07:00
|
|
|
gs_texrender_destroy(source->async_prev_texrender);
|
2016-03-15 20:37:19 -07:00
|
|
|
source->async_texrender = NULL;
|
2016-03-15 20:39:36 -07:00
|
|
|
source->async_prev_texrender = NULL;
|
2014-04-23 20:57:56 -07:00
|
|
|
|
2019-08-09 20:43:14 -07:00
|
|
|
const enum gs_color_format format = convert_video_format(frame->format);
|
|
|
|
const bool async_gpu_conversion = (cur != CONVERT_NONE) &&
|
|
|
|
init_gpu_conversion(source, frame);
|
|
|
|
source->async_gpu_conversion = async_gpu_conversion;
|
|
|
|
if (async_gpu_conversion) {
|
2016-03-15 20:37:19 -07:00
|
|
|
source->async_texrender =
|
2019-04-22 23:38:26 -07:00
|
|
|
gs_texrender_create(format, GS_ZS_NONE);
|
2014-04-23 20:57:56 -07:00
|
|
|
|
2019-08-09 20:43:14 -07:00
|
|
|
for (int c = 0; c < source->async_channel_count; ++c)
|
|
|
|
source->async_textures[c] = gs_texture_create(
|
|
|
|
source->async_convert_width[c],
|
|
|
|
source->async_convert_height[c],
|
|
|
|
source->async_texture_formats[c], 1, NULL,
|
|
|
|
GS_DYNAMIC);
|
2014-04-23 20:57:56 -07:00
|
|
|
} else {
|
2019-08-09 20:43:14 -07:00
|
|
|
source->async_textures[0] =
|
|
|
|
gs_texture_create(frame->width, frame->height, format,
|
|
|
|
1, NULL, GS_DYNAMIC);
|
2014-04-23 20:57:56 -07:00
|
|
|
}
|
|
|
|
|
2016-03-15 20:39:36 -07:00
|
|
|
if (deinterlacing_enabled(source))
|
|
|
|
set_deinterlace_texture_size(source);
|
|
|
|
|
2016-11-05 03:34:02 -07:00
|
|
|
gs_leave_context();
|
|
|
|
|
2019-08-09 20:43:14 -07:00
|
|
|
return source->async_textures[0] != NULL;
|
2014-04-23 20:57:56 -07:00
|
|
|
}
|
|
|
|
|
2019-08-09 20:43:14 -07:00
|
|
|
static void upload_raw_frame(gs_texture_t *tex[MAX_AV_PLANES],
|
2019-06-22 22:13:45 -07:00
|
|
|
const struct obs_source_frame *frame)
|
2014-04-23 20:57:56 -07:00
|
|
|
{
|
2019-04-22 23:38:26 -07:00
|
|
|
switch (get_convert_type(frame->format, frame->full_range)) {
|
2019-08-11 11:26:22 -07:00
|
|
|
case CONVERT_422_PACK:
|
2019-06-22 22:13:45 -07:00
|
|
|
case CONVERT_800:
|
|
|
|
case CONVERT_RGB_LIMITED:
|
|
|
|
case CONVERT_BGR3:
|
|
|
|
case CONVERT_420:
|
2019-06-17 22:25:18 -07:00
|
|
|
case CONVERT_422:
|
2019-06-22 22:13:45 -07:00
|
|
|
case CONVERT_NV12:
|
|
|
|
case CONVERT_444:
|
2019-08-11 11:26:22 -07:00
|
|
|
case CONVERT_420_A:
|
|
|
|
case CONVERT_422_A:
|
|
|
|
case CONVERT_444_A:
|
|
|
|
case CONVERT_444_A_PACK:
|
2019-08-09 20:43:14 -07:00
|
|
|
for (size_t c = 0; c < MAX_AV_PLANES; c++) {
|
|
|
|
if (tex[c])
|
|
|
|
gs_texture_set_image(tex[c], frame->data[c],
|
|
|
|
frame->linesize[c], false);
|
|
|
|
}
|
2019-06-22 22:13:45 -07:00
|
|
|
break;
|
2019-04-02 09:03:57 -07:00
|
|
|
|
2019-06-22 22:13:45 -07:00
|
|
|
case CONVERT_NONE:
|
|
|
|
assert(false && "No conversion requested");
|
|
|
|
break;
|
2014-04-23 20:57:56 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-04-22 23:38:26 -07:00
|
|
|
static const char *select_conversion_technique(enum video_format format,
|
2019-06-22 22:13:45 -07:00
|
|
|
bool full_range)
|
2014-04-23 20:57:56 -07:00
|
|
|
{
|
|
|
|
switch (format) {
|
2019-06-22 22:13:45 -07:00
|
|
|
case VIDEO_FORMAT_UYVY:
|
|
|
|
return "UYVY_Reverse";
|
2014-04-23 20:57:56 -07:00
|
|
|
|
2019-06-22 22:13:45 -07:00
|
|
|
case VIDEO_FORMAT_YUY2:
|
|
|
|
return "YUY2_Reverse";
|
2014-04-23 20:57:56 -07:00
|
|
|
|
2019-06-22 22:13:45 -07:00
|
|
|
case VIDEO_FORMAT_YVYU:
|
|
|
|
return "YVYU_Reverse";
|
2014-04-23 20:57:56 -07:00
|
|
|
|
2019-06-22 22:13:45 -07:00
|
|
|
case VIDEO_FORMAT_I420:
|
|
|
|
return "I420_Reverse";
|
2014-05-30 02:23:36 -07:00
|
|
|
|
2019-06-22 22:13:45 -07:00
|
|
|
case VIDEO_FORMAT_NV12:
|
|
|
|
return "NV12_Reverse";
|
2019-04-02 09:03:57 -07:00
|
|
|
|
2019-06-22 22:13:45 -07:00
|
|
|
case VIDEO_FORMAT_I444:
|
|
|
|
return "I444_Reverse";
|
2014-04-23 20:57:56 -07:00
|
|
|
|
2019-06-22 22:13:45 -07:00
|
|
|
case VIDEO_FORMAT_Y800:
|
|
|
|
return full_range ? "Y800_Full" : "Y800_Limited";
|
2019-04-22 23:38:26 -07:00
|
|
|
|
2019-06-22 22:13:45 -07:00
|
|
|
case VIDEO_FORMAT_BGR3:
|
|
|
|
return full_range ? "BGR3_Full" : "BGR3_Limited";
|
2019-05-30 06:05:53 -07:00
|
|
|
|
2019-06-17 22:25:18 -07:00
|
|
|
case VIDEO_FORMAT_I422:
|
|
|
|
return "I422_Reverse";
|
|
|
|
|
2019-08-11 11:26:22 -07:00
|
|
|
case VIDEO_FORMAT_I40A:
|
|
|
|
return "I40A_Reverse";
|
|
|
|
|
|
|
|
case VIDEO_FORMAT_I42A:
|
|
|
|
return "I42A_Reverse";
|
|
|
|
|
|
|
|
case VIDEO_FORMAT_YUVA:
|
|
|
|
return "YUVA_Reverse";
|
|
|
|
|
|
|
|
case VIDEO_FORMAT_AYUV:
|
|
|
|
return "AYUV_Reverse";
|
|
|
|
|
2019-06-22 22:13:45 -07:00
|
|
|
case VIDEO_FORMAT_BGRA:
|
|
|
|
case VIDEO_FORMAT_BGRX:
|
|
|
|
case VIDEO_FORMAT_RGBA:
|
|
|
|
case VIDEO_FORMAT_NONE:
|
|
|
|
if (full_range)
|
|
|
|
assert(false && "No conversion requested");
|
|
|
|
else
|
|
|
|
return "RGB_Limited";
|
|
|
|
break;
|
2014-04-23 20:57:56 -07:00
|
|
|
}
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2014-09-25 17:44:05 -07:00
|
|
|
static inline void set_eparam(gs_effect_t *effect, const char *name, float val)
|
2014-04-23 20:57:56 -07:00
|
|
|
{
|
2014-09-25 17:44:05 -07:00
|
|
|
gs_eparam_t *param = gs_effect_get_param_by_name(effect, name);
|
2014-08-07 23:42:07 -07:00
|
|
|
gs_effect_set_float(param, val);
|
2014-04-23 20:57:56 -07:00
|
|
|
}
|
|
|
|
|
2017-05-06 01:22:51 -07:00
|
|
|
static inline void set_eparami(gs_effect_t *effect, const char *name, int val)
|
|
|
|
{
|
|
|
|
gs_eparam_t *param = gs_effect_get_param_by_name(effect, name);
|
|
|
|
gs_effect_set_int(param, val);
|
|
|
|
}
|
|
|
|
|
2014-04-23 20:57:56 -07:00
|
|
|
static bool update_async_texrender(struct obs_source *source,
|
2019-06-22 22:13:45 -07:00
|
|
|
const struct obs_source_frame *frame,
|
2019-08-09 20:43:14 -07:00
|
|
|
gs_texture_t *tex[MAX_AV_PLANES],
|
|
|
|
gs_texrender_t *texrender)
|
2014-04-23 20:57:56 -07:00
|
|
|
{
|
2019-04-02 23:23:37 -07:00
|
|
|
GS_DEBUG_MARKER_BEGIN(GS_DEBUG_COLOR_CONVERT_FORMAT, "Convert Format");
|
|
|
|
|
2014-08-07 23:42:07 -07:00
|
|
|
gs_texrender_reset(texrender);
|
2014-04-23 20:57:56 -07:00
|
|
|
|
|
|
|
upload_raw_frame(tex, frame);
|
|
|
|
|
|
|
|
uint32_t cx = source->async_width;
|
|
|
|
uint32_t cy = source->async_height;
|
|
|
|
|
2014-09-25 17:44:05 -07:00
|
|
|
gs_effect_t *conv = obs->video.conversion_effect;
|
2019-06-22 22:13:45 -07:00
|
|
|
const char *tech_name =
|
|
|
|
select_conversion_technique(frame->format, frame->full_range);
|
2019-04-22 23:38:26 -07:00
|
|
|
gs_technique_t *tech = gs_effect_get_technique(conv, tech_name);
|
2014-04-23 20:57:56 -07:00
|
|
|
|
2019-08-09 20:43:14 -07:00
|
|
|
const bool success = gs_texrender_begin(texrender, cx, cy);
|
2019-06-12 22:23:51 -07:00
|
|
|
|
2019-08-09 20:43:14 -07:00
|
|
|
if (success) {
|
|
|
|
gs_enable_blending(false);
|
2014-04-23 20:57:56 -07:00
|
|
|
|
2019-08-09 20:43:14 -07:00
|
|
|
gs_technique_begin(tech);
|
|
|
|
gs_technique_begin_pass(tech, 0);
|
2014-04-23 20:57:56 -07:00
|
|
|
|
2019-08-09 20:43:14 -07:00
|
|
|
if (tex[0])
|
|
|
|
gs_effect_set_texture(
|
|
|
|
gs_effect_get_param_by_name(conv, "image"),
|
|
|
|
tex[0]);
|
|
|
|
if (tex[1])
|
|
|
|
gs_effect_set_texture(
|
|
|
|
gs_effect_get_param_by_name(conv, "image1"),
|
|
|
|
tex[1]);
|
|
|
|
if (tex[2])
|
|
|
|
gs_effect_set_texture(
|
|
|
|
gs_effect_get_param_by_name(conv, "image2"),
|
|
|
|
tex[2]);
|
2019-08-11 11:26:22 -07:00
|
|
|
if (tex[3])
|
|
|
|
gs_effect_set_texture(
|
|
|
|
gs_effect_get_param_by_name(conv, "image3"),
|
|
|
|
tex[3]);
|
2019-08-09 20:43:14 -07:00
|
|
|
set_eparam(conv, "width", (float)cx);
|
|
|
|
set_eparam(conv, "height", (float)cy);
|
|
|
|
set_eparam(conv, "width_d2", (float)cx * 0.5f);
|
|
|
|
set_eparam(conv, "height_d2", (float)cy * 0.5f);
|
|
|
|
set_eparam(conv, "width_x2_i", 0.5f / (float)cx);
|
|
|
|
|
|
|
|
struct vec4 vec0, vec1, vec2;
|
|
|
|
vec4_set(&vec0, frame->color_matrix[0], frame->color_matrix[1],
|
|
|
|
frame->color_matrix[2], frame->color_matrix[3]);
|
|
|
|
vec4_set(&vec1, frame->color_matrix[4], frame->color_matrix[5],
|
|
|
|
frame->color_matrix[6], frame->color_matrix[7]);
|
|
|
|
vec4_set(&vec2, frame->color_matrix[8], frame->color_matrix[9],
|
|
|
|
frame->color_matrix[10], frame->color_matrix[11]);
|
|
|
|
gs_effect_set_vec4(
|
|
|
|
gs_effect_get_param_by_name(conv, "color_vec0"), &vec0);
|
|
|
|
gs_effect_set_vec4(
|
|
|
|
gs_effect_get_param_by_name(conv, "color_vec1"), &vec1);
|
|
|
|
gs_effect_set_vec4(
|
|
|
|
gs_effect_get_param_by_name(conv, "color_vec2"), &vec2);
|
|
|
|
if (!frame->full_range) {
|
|
|
|
gs_eparam_t *min_param = gs_effect_get_param_by_name(
|
|
|
|
conv, "color_range_min");
|
|
|
|
gs_effect_set_val(min_param, frame->color_range_min,
|
|
|
|
sizeof(float) * 3);
|
|
|
|
gs_eparam_t *max_param = gs_effect_get_param_by_name(
|
|
|
|
conv, "color_range_max");
|
|
|
|
gs_effect_set_val(max_param, frame->color_range_max,
|
|
|
|
sizeof(float) * 3);
|
|
|
|
}
|
2019-04-02 09:03:57 -07:00
|
|
|
|
2019-08-09 20:43:14 -07:00
|
|
|
gs_draw(GS_TRIS, 0, 3);
|
2014-04-23 20:57:56 -07:00
|
|
|
|
2019-08-09 20:43:14 -07:00
|
|
|
gs_technique_end_pass(tech);
|
|
|
|
gs_technique_end(tech);
|
2014-04-23 20:57:56 -07:00
|
|
|
|
2019-08-09 20:43:14 -07:00
|
|
|
gs_enable_blending(true);
|
2019-06-12 22:23:51 -07:00
|
|
|
|
2019-08-09 20:43:14 -07:00
|
|
|
gs_texrender_end(texrender);
|
|
|
|
}
|
2014-04-23 20:57:56 -07:00
|
|
|
|
2019-04-02 23:23:37 -07:00
|
|
|
GS_DEBUG_MARKER_END();
|
2019-08-09 20:43:14 -07:00
|
|
|
return success;
|
2014-04-23 20:57:56 -07:00
|
|
|
}
|
|
|
|
|
2016-03-15 20:33:20 -07:00
|
|
|
bool update_async_texture(struct obs_source *source,
|
2019-06-22 22:13:45 -07:00
|
|
|
const struct obs_source_frame *frame,
|
|
|
|
gs_texture_t *tex, gs_texrender_t *texrender)
|
2019-08-09 20:43:14 -07:00
|
|
|
{
|
|
|
|
gs_texture_t *tex3[MAX_AV_PLANES] = {tex, NULL, NULL, NULL,
|
|
|
|
NULL, NULL, NULL, NULL};
|
|
|
|
return update_async_textures(source, frame, tex3, texrender);
|
|
|
|
}
|
|
|
|
|
|
|
|
bool update_async_textures(struct obs_source *source,
|
|
|
|
const struct obs_source_frame *frame,
|
|
|
|
gs_texture_t *tex[MAX_AV_PLANES],
|
|
|
|
gs_texrender_t *texrender)
|
2013-10-26 14:32:06 -07:00
|
|
|
{
|
2019-04-22 23:38:26 -07:00
|
|
|
enum convert_type type;
|
2014-04-13 02:22:28 -07:00
|
|
|
|
2019-06-22 22:13:45 -07:00
|
|
|
source->async_flip = frame->flip;
|
2013-10-26 14:32:06 -07:00
|
|
|
|
2014-04-23 20:57:56 -07:00
|
|
|
if (source->async_gpu_conversion && texrender)
|
2016-03-15 20:33:20 -07:00
|
|
|
return update_async_texrender(source, frame, tex, texrender);
|
2014-04-23 20:57:56 -07:00
|
|
|
|
2019-04-22 23:38:26 -07:00
|
|
|
type = get_convert_type(frame->format, frame->full_range);
|
2013-10-26 14:32:06 -07:00
|
|
|
if (type == CONVERT_NONE) {
|
2019-08-09 20:43:14 -07:00
|
|
|
gs_texture_set_image(tex[0], frame->data[0], frame->linesize[0],
|
2019-06-22 22:13:45 -07:00
|
|
|
false);
|
2013-10-26 14:32:06 -07:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2019-05-19 21:27:18 -07:00
|
|
|
return false;
|
2013-10-26 14:32:06 -07:00
|
|
|
}
|
|
|
|
|
2014-04-13 02:22:28 -07:00
|
|
|
static inline void obs_source_draw_texture(struct obs_source *source,
|
2019-06-22 22:13:45 -07:00
|
|
|
gs_effect_t *effect)
|
2013-10-25 10:25:28 -07:00
|
|
|
{
|
2019-08-09 20:43:14 -07:00
|
|
|
gs_texture_t *tex = source->async_textures[0];
|
2019-06-22 22:13:45 -07:00
|
|
|
gs_eparam_t *param;
|
2013-10-25 10:25:28 -07:00
|
|
|
|
2016-03-15 20:37:19 -07:00
|
|
|
if (source->async_texrender)
|
|
|
|
tex = gs_texrender_get_texture(source->async_texrender);
|
2014-04-23 20:57:56 -07:00
|
|
|
|
2014-08-07 23:42:07 -07:00
|
|
|
param = gs_effect_get_param_by_name(effect, "image");
|
|
|
|
gs_effect_set_texture(param, tex);
|
2013-10-25 10:25:28 -07:00
|
|
|
|
2014-04-13 02:22:28 -07:00
|
|
|
gs_draw_sprite(tex, source->async_flip ? GS_FLIP_V : 0, 0, 0);
|
|
|
|
}
|
2013-10-25 10:25:28 -07:00
|
|
|
|
2014-04-13 02:22:28 -07:00
|
|
|
static void obs_source_draw_async_texture(struct obs_source *source)
|
|
|
|
{
|
2019-06-22 22:13:45 -07:00
|
|
|
gs_effect_t *effect = gs_get_effect();
|
|
|
|
bool def_draw = (!effect);
|
|
|
|
gs_technique_t *tech = NULL;
|
2014-04-13 02:22:28 -07:00
|
|
|
|
|
|
|
if (def_draw) {
|
2015-10-16 07:31:52 -07:00
|
|
|
effect = obs_get_base_effect(OBS_EFFECT_DEFAULT);
|
2019-04-02 09:03:57 -07:00
|
|
|
tech = gs_effect_get_technique(effect, "Draw");
|
2014-08-07 23:42:07 -07:00
|
|
|
gs_technique_begin(tech);
|
|
|
|
gs_technique_begin_pass(tech, 0);
|
2014-04-13 02:22:28 -07:00
|
|
|
}
|
|
|
|
|
2019-04-02 09:03:57 -07:00
|
|
|
obs_source_draw_texture(source, effect);
|
2014-04-13 02:22:28 -07:00
|
|
|
|
|
|
|
if (def_draw) {
|
2014-08-07 23:42:07 -07:00
|
|
|
gs_technique_end_pass(tech);
|
|
|
|
gs_technique_end(tech);
|
2014-04-13 02:22:28 -07:00
|
|
|
}
|
2013-10-25 10:25:28 -07:00
|
|
|
}
|
|
|
|
|
2019-09-17 12:43:47 -07:00
|
|
|
static void recreate_async_texture(obs_source_t *source,
|
|
|
|
enum gs_color_format format)
|
|
|
|
{
|
|
|
|
uint32_t cx = gs_texture_get_width(source->async_textures[0]);
|
|
|
|
uint32_t cy = gs_texture_get_height(source->async_textures[0]);
|
|
|
|
gs_texture_destroy(source->async_textures[0]);
|
|
|
|
source->async_textures[0] =
|
|
|
|
gs_texture_create(cx, cy, format, 1, NULL, GS_DYNAMIC);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void check_to_swap_bgrx_bgra(obs_source_t *source,
|
|
|
|
struct obs_source_frame *frame)
|
|
|
|
{
|
|
|
|
enum gs_color_format format =
|
|
|
|
gs_texture_get_color_format(source->async_textures[0]);
|
|
|
|
if (format == GS_BGRX && frame->format == VIDEO_FORMAT_BGRA) {
|
|
|
|
recreate_async_texture(source, GS_BGRA);
|
|
|
|
} else if (format == GS_BGRA && frame->format == VIDEO_FORMAT_BGRX) {
|
|
|
|
recreate_async_texture(source, GS_BGRX);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-10-22 17:46:54 -07:00
|
|
|
static void obs_source_update_async_video(obs_source_t *source)
|
2013-10-24 00:57:55 -07:00
|
|
|
{
|
2014-09-12 00:35:21 -07:00
|
|
|
if (!source->async_rendered) {
|
|
|
|
struct obs_source_frame *frame = obs_source_get_frame(source);
|
|
|
|
|
2015-03-23 13:45:38 -07:00
|
|
|
if (frame)
|
|
|
|
frame = filter_async_video(source, frame);
|
|
|
|
|
2014-09-12 00:35:21 -07:00
|
|
|
source->async_rendered = true;
|
|
|
|
if (frame) {
|
2019-09-17 12:43:47 -07:00
|
|
|
check_to_swap_bgrx_bgra(source, frame);
|
|
|
|
|
2017-09-29 00:37:33 -07:00
|
|
|
if (!source->async_decoupled ||
|
|
|
|
!source->async_unbuffered) {
|
2019-06-22 22:13:45 -07:00
|
|
|
source->timing_adjust = obs->video.video_time -
|
|
|
|
frame->timestamp;
|
2017-09-29 00:37:33 -07:00
|
|
|
source->timing_set = true;
|
|
|
|
}
|
2015-03-23 13:45:38 -07:00
|
|
|
|
2016-11-05 03:34:02 -07:00
|
|
|
if (source->async_update_texture) {
|
2019-08-09 20:43:14 -07:00
|
|
|
update_async_textures(source, frame,
|
|
|
|
source->async_textures,
|
|
|
|
source->async_texrender);
|
2016-11-05 03:34:02 -07:00
|
|
|
source->async_update_texture = false;
|
2016-03-15 20:33:20 -07:00
|
|
|
}
|
2014-09-12 00:35:21 -07:00
|
|
|
|
2016-03-15 20:33:20 -07:00
|
|
|
obs_source_release_frame(source, frame);
|
|
|
|
}
|
2014-04-13 02:22:28 -07:00
|
|
|
}
|
2015-10-22 17:46:54 -07:00
|
|
|
}
|
2013-10-24 00:57:55 -07:00
|
|
|
|
2015-10-22 17:46:54 -07:00
|
|
|
static inline void obs_source_render_async_video(obs_source_t *source)
|
|
|
|
{
|
2019-08-09 20:43:14 -07:00
|
|
|
if (source->async_textures[0] && source->async_active)
|
2014-04-13 02:22:28 -07:00
|
|
|
obs_source_draw_async_texture(source);
|
2013-10-24 00:57:55 -07:00
|
|
|
}
|
|
|
|
|
2014-09-25 17:44:05 -07:00
|
|
|
static inline void obs_source_render_filters(obs_source_t *source)
|
2013-12-22 00:30:18 -08:00
|
|
|
{
|
2018-03-08 23:50:23 -08:00
|
|
|
obs_source_t *first_filter;
|
|
|
|
|
|
|
|
pthread_mutex_lock(&source->filter_mutex);
|
|
|
|
first_filter = source->filters.array[0];
|
|
|
|
obs_source_addref(first_filter);
|
|
|
|
pthread_mutex_unlock(&source->filter_mutex);
|
|
|
|
|
2013-12-22 00:30:18 -08:00
|
|
|
source->rendering_filter = true;
|
2018-03-08 23:50:23 -08:00
|
|
|
obs_source_video_render(first_filter);
|
2013-12-22 00:30:18 -08:00
|
|
|
source->rendering_filter = false;
|
2018-03-08 23:50:23 -08:00
|
|
|
|
|
|
|
obs_source_release(first_filter);
|
2013-12-22 00:30:18 -08:00
|
|
|
}
|
|
|
|
|
2017-05-16 17:28:00 -07:00
|
|
|
void obs_source_default_render(obs_source_t *source)
|
2013-12-22 00:30:18 -08:00
|
|
|
{
|
2019-06-22 22:13:45 -07:00
|
|
|
gs_effect_t *effect = obs->video.default_effect;
|
|
|
|
gs_technique_t *tech = gs_effect_get_technique(effect, "Draw");
|
|
|
|
size_t passes, i;
|
2013-12-22 00:30:18 -08:00
|
|
|
|
2014-08-07 23:42:07 -07:00
|
|
|
passes = gs_technique_begin(tech);
|
2013-12-22 00:30:18 -08:00
|
|
|
for (i = 0; i < passes; i++) {
|
2014-08-07 23:42:07 -07:00
|
|
|
gs_technique_begin_pass(tech, i);
|
2014-05-04 16:20:11 -07:00
|
|
|
if (source->context.data)
|
|
|
|
source->info.video_render(source->context.data, effect);
|
2014-08-07 23:42:07 -07:00
|
|
|
gs_technique_end_pass(tech);
|
2013-12-22 00:30:18 -08:00
|
|
|
}
|
2014-08-07 23:42:07 -07:00
|
|
|
gs_technique_end(tech);
|
2013-12-22 00:30:18 -08:00
|
|
|
}
|
|
|
|
|
2014-09-25 17:44:05 -07:00
|
|
|
static inline void obs_source_main_render(obs_source_t *source)
|
2013-12-22 00:30:18 -08:00
|
|
|
{
|
2019-06-22 22:13:45 -07:00
|
|
|
uint32_t flags = source->info.output_flags;
|
|
|
|
bool custom_draw = (flags & OBS_SOURCE_CUSTOM_DRAW) != 0;
|
2013-12-22 00:30:18 -08:00
|
|
|
bool default_effect = !source->filter_parent &&
|
2019-06-22 22:13:45 -07:00
|
|
|
source->filters.num == 0 && !custom_draw;
|
2013-12-22 00:30:18 -08:00
|
|
|
|
|
|
|
if (default_effect)
|
2016-03-03 17:16:49 -08:00
|
|
|
obs_source_default_render(source);
|
2014-05-04 16:20:11 -07:00
|
|
|
else if (source->context.data)
|
libobs: Add services API, reduce repeated code
Add API for streaming services. The services API simplifies the
creation of custom service features and user interface.
Custom streaming services later on will be able to do things such as:
- Be able to use service-specific APIs via modules, allowing a more
direct means of communicating with the service and requesting or
setting service-specific information
- Get URL/stream key via other means of authentication such as OAuth,
or be able to build custom URLs for services that require that sort
of thing.
- Query information (such as viewer count, chat, follower
notifications, and other information)
- Set channel information (such as current game, current channel title,
activating commercials)
Also, I reduce some repeated code that was used for all libobs objects.
This includes the name of the object, the private data, settings, as
well as the signal and procedure handlers.
I also switched to using linked lists for the global object lists,
rather than using an array of pointers (you could say it was..
pointless.) ..Anyway, the linked list info is also stored in the shared
context data structure.
2014-04-19 20:38:53 -07:00
|
|
|
source->info.video_render(source->context.data,
|
2019-06-22 22:13:45 -07:00
|
|
|
custom_draw ? NULL : gs_get_effect());
|
2013-12-22 00:30:18 -08:00
|
|
|
}
|
|
|
|
|
2015-03-17 18:15:50 -07:00
|
|
|
static bool ready_async_frame(obs_source_t *source, uint64_t sys_time);
|
|
|
|
|
2019-04-02 23:23:37 -07:00
|
|
|
#if GS_USE_DEBUG_MARKERS
|
|
|
|
static const char *get_type_format(enum obs_source_type type)
|
|
|
|
{
|
|
|
|
switch (type) {
|
|
|
|
case OBS_SOURCE_TYPE_INPUT:
|
|
|
|
return "Input: %s";
|
|
|
|
case OBS_SOURCE_TYPE_FILTER:
|
|
|
|
return "Filter: %s";
|
|
|
|
case OBS_SOURCE_TYPE_TRANSITION:
|
|
|
|
return "Transition: %s";
|
|
|
|
case OBS_SOURCE_TYPE_SCENE:
|
|
|
|
return "Scene: %s";
|
|
|
|
default:
|
|
|
|
return "[Unknown]: %s";
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2015-12-02 07:37:55 -08:00
|
|
|
static inline void render_video(obs_source_t *source)
|
2013-09-30 19:37:13 -07:00
|
|
|
{
|
2015-07-09 01:58:12 -07:00
|
|
|
if (source->info.type != OBS_SOURCE_TYPE_FILTER &&
|
2018-01-17 02:08:06 -08:00
|
|
|
(source->info.output_flags & OBS_SOURCE_VIDEO) == 0) {
|
|
|
|
if (source->filter_parent)
|
|
|
|
obs_source_skip_video_filter(source);
|
2015-06-19 21:50:50 -07:00
|
|
|
return;
|
2018-01-17 02:08:06 -08:00
|
|
|
}
|
2015-06-19 21:50:50 -07:00
|
|
|
|
2015-10-22 17:46:54 -07:00
|
|
|
if (source->info.type == OBS_SOURCE_TYPE_INPUT &&
|
|
|
|
(source->info.output_flags & OBS_SOURCE_ASYNC) != 0 &&
|
2016-03-15 20:39:36 -07:00
|
|
|
!source->rendering_filter) {
|
|
|
|
if (deinterlacing_enabled(source))
|
|
|
|
deinterlace_update_async_video(source);
|
2015-10-22 17:46:54 -07:00
|
|
|
obs_source_update_async_video(source);
|
2016-03-15 20:39:36 -07:00
|
|
|
}
|
2015-10-22 17:46:54 -07:00
|
|
|
|
2015-03-19 10:23:44 -07:00
|
|
|
if (!source->context.data || !source->enabled) {
|
2015-03-17 18:15:50 -07:00
|
|
|
if (source->filter_parent)
|
|
|
|
obs_source_skip_video_filter(source);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2019-04-02 23:23:37 -07:00
|
|
|
GS_DEBUG_MARKER_BEGIN_FORMAT(GS_DEBUG_COLOR_SOURCE,
|
2019-06-22 22:13:45 -07:00
|
|
|
get_type_format(source->info.type),
|
|
|
|
obs_source_get_name(source));
|
2019-04-02 23:23:37 -07:00
|
|
|
|
2014-04-13 02:22:28 -07:00
|
|
|
if (source->filters.num && !source->rendering_filter)
|
|
|
|
obs_source_render_filters(source);
|
2013-10-24 00:57:55 -07:00
|
|
|
|
2014-04-13 02:22:28 -07:00
|
|
|
else if (source->info.video_render)
|
|
|
|
obs_source_main_render(source);
|
|
|
|
|
|
|
|
else if (source->filter_target)
|
2013-10-24 00:57:55 -07:00
|
|
|
obs_source_video_render(source->filter_target);
|
|
|
|
|
2016-03-15 20:39:36 -07:00
|
|
|
else if (deinterlacing_enabled(source))
|
|
|
|
deinterlace_render(source);
|
|
|
|
|
2014-04-13 02:22:28 -07:00
|
|
|
else
|
2013-10-24 00:57:55 -07:00
|
|
|
obs_source_render_async_video(source);
|
2019-04-02 23:23:37 -07:00
|
|
|
|
|
|
|
GS_DEBUG_MARKER_END();
|
2013-09-30 19:37:13 -07:00
|
|
|
}
|
|
|
|
|
2015-12-02 07:37:55 -08:00
|
|
|
void obs_source_video_render(obs_source_t *source)
|
|
|
|
{
|
|
|
|
if (!obs_source_valid(source, "obs_source_video_render"))
|
|
|
|
return;
|
|
|
|
|
|
|
|
obs_source_addref(source);
|
|
|
|
render_video(source);
|
|
|
|
obs_source_release(source);
|
|
|
|
}
|
|
|
|
|
2015-02-25 21:02:42 -08:00
|
|
|
static uint32_t get_base_width(const obs_source_t *source)
|
2013-09-30 19:37:13 -07:00
|
|
|
{
|
2018-01-17 02:08:06 -08:00
|
|
|
bool is_filter = !!source->filter_parent;
|
2018-09-13 06:52:33 -07:00
|
|
|
bool func_valid = source->context.data && source->info.get_width;
|
2015-03-17 18:15:50 -07:00
|
|
|
|
libobs: Implement transition sources
Transition sources are implemented by registering a source type as
OBS_SOURCE_TYPE_TRANSITION. They're automatically marked as video
composite sources, and video_render/audio_render callbacks must be set
when registering the source. get_width and get_height callbacks are
unused for these types of sources, as transitions automatically handle
width/height behind the scenes with the transition settings.
In the video_render callback, the helper function
obs_transition_video_render is used to assist in automatically
processing and rendering the audio. A render callback is passed to the
function, which in turn passes to/from textures that are automatically
rendered in the back-end.
Similarly, in the audio_render callback, the helper function
obs_transition_audio_render is used to assist in automatically
processing and rendering the audio. Two mix callbacks are used to
handle how the source/destination sources are mixed together. To ensure
the best possible quality, audio processing is per-sample.
Transitions can be set to automatically resize, or they can be set to
have a fixed size. Sources within transitions can be made to scale to
the transition size (with or without aspect ratio), or to not scale
unless they're bigger than the transition. They can have a specific
alignment within the transition, or they just default to top-left.
These features are implemented for the purpose of extending transitions
to also act as "switch" sources later, where you can switch to/from two
different sources using the transition animation.
Planned (but not yet implemented and lower priority) features:
- "Switch" transitions which allow the ability to switch back and forth
between two sources with a transitioning animation without discarding
the references
- Easing options to allow the option to transition with a bezier or
custom curve
- Manual transitioning to allow the front-end/user to manually control
the transition offset
2016-01-03 16:41:14 -08:00
|
|
|
if (source->info.type == OBS_SOURCE_TYPE_TRANSITION) {
|
|
|
|
return source->enabled ? source->transition_actual_cx : 0;
|
|
|
|
|
2018-09-13 06:52:33 -07:00
|
|
|
} else if (func_valid && (!is_filter || source->enabled)) {
|
2014-08-04 14:38:26 -07:00
|
|
|
return source->info.get_width(source->context.data);
|
2015-02-25 21:02:42 -08:00
|
|
|
|
2018-01-17 02:08:06 -08:00
|
|
|
} else if (is_filter) {
|
2015-02-25 21:02:42 -08:00
|
|
|
return get_base_width(source->filter_target);
|
|
|
|
}
|
|
|
|
|
2015-03-07 09:03:31 -08:00
|
|
|
return source->async_active ? source->async_width : 0;
|
2013-09-30 19:37:13 -07:00
|
|
|
}
|
|
|
|
|
2015-02-25 21:02:42 -08:00
|
|
|
static uint32_t get_base_height(const obs_source_t *source)
|
2013-09-30 19:37:13 -07:00
|
|
|
{
|
2018-01-17 02:08:06 -08:00
|
|
|
bool is_filter = !!source->filter_parent;
|
2018-09-13 06:52:33 -07:00
|
|
|
bool func_valid = source->context.data && source->info.get_height;
|
2015-03-17 18:15:50 -07:00
|
|
|
|
libobs: Implement transition sources
Transition sources are implemented by registering a source type as
OBS_SOURCE_TYPE_TRANSITION. They're automatically marked as video
composite sources, and video_render/audio_render callbacks must be set
when registering the source. get_width and get_height callbacks are
unused for these types of sources, as transitions automatically handle
width/height behind the scenes with the transition settings.
In the video_render callback, the helper function
obs_transition_video_render is used to assist in automatically
processing and rendering the audio. A render callback is passed to the
function, which in turn passes to/from textures that are automatically
rendered in the back-end.
Similarly, in the audio_render callback, the helper function
obs_transition_audio_render is used to assist in automatically
processing and rendering the audio. Two mix callbacks are used to
handle how the source/destination sources are mixed together. To ensure
the best possible quality, audio processing is per-sample.
Transitions can be set to automatically resize, or they can be set to
have a fixed size. Sources within transitions can be made to scale to
the transition size (with or without aspect ratio), or to not scale
unless they're bigger than the transition. They can have a specific
alignment within the transition, or they just default to top-left.
These features are implemented for the purpose of extending transitions
to also act as "switch" sources later, where you can switch to/from two
different sources using the transition animation.
Planned (but not yet implemented and lower priority) features:
- "Switch" transitions which allow the ability to switch back and forth
between two sources with a transitioning animation without discarding
the references
- Easing options to allow the option to transition with a bezier or
custom curve
- Manual transitioning to allow the front-end/user to manually control
the transition offset
2016-01-03 16:41:14 -08:00
|
|
|
if (source->info.type == OBS_SOURCE_TYPE_TRANSITION) {
|
|
|
|
return source->enabled ? source->transition_actual_cy : 0;
|
|
|
|
|
2018-09-13 06:52:33 -07:00
|
|
|
} else if (func_valid && (!is_filter || source->enabled)) {
|
2014-08-04 14:38:26 -07:00
|
|
|
return source->info.get_height(source->context.data);
|
2015-02-25 21:02:42 -08:00
|
|
|
|
2015-03-17 18:15:50 -07:00
|
|
|
} else if (is_filter) {
|
2015-02-25 21:02:42 -08:00
|
|
|
return get_base_height(source->filter_target);
|
|
|
|
}
|
|
|
|
|
2015-03-07 09:03:31 -08:00
|
|
|
return source->async_active ? source->async_height : 0;
|
2013-09-30 19:37:13 -07:00
|
|
|
}
|
|
|
|
|
2015-02-25 21:02:42 -08:00
|
|
|
static uint32_t get_recurse_width(obs_source_t *source)
|
|
|
|
{
|
|
|
|
uint32_t width;
|
|
|
|
|
|
|
|
pthread_mutex_lock(&source->filter_mutex);
|
|
|
|
|
2019-06-22 22:13:45 -07:00
|
|
|
width = (source->filters.num) ? get_base_width(source->filters.array[0])
|
|
|
|
: get_base_width(source);
|
2015-02-25 21:02:42 -08:00
|
|
|
|
|
|
|
pthread_mutex_unlock(&source->filter_mutex);
|
|
|
|
|
|
|
|
return width;
|
|
|
|
}
|
|
|
|
|
|
|
|
static uint32_t get_recurse_height(obs_source_t *source)
|
|
|
|
{
|
|
|
|
uint32_t height;
|
|
|
|
|
|
|
|
pthread_mutex_lock(&source->filter_mutex);
|
|
|
|
|
2019-06-22 22:13:45 -07:00
|
|
|
height = (source->filters.num)
|
|
|
|
? get_base_height(source->filters.array[0])
|
|
|
|
: get_base_height(source);
|
2015-02-25 21:02:42 -08:00
|
|
|
|
|
|
|
pthread_mutex_unlock(&source->filter_mutex);
|
|
|
|
|
|
|
|
return height;
|
|
|
|
}
|
|
|
|
|
|
|
|
uint32_t obs_source_get_width(obs_source_t *source)
|
|
|
|
{
|
2015-10-16 18:49:45 -07:00
|
|
|
if (!data_valid(source, "obs_source_get_width"))
|
|
|
|
return 0;
|
2015-02-25 21:02:42 -08:00
|
|
|
|
2019-06-22 22:13:45 -07:00
|
|
|
return (source->info.type != OBS_SOURCE_TYPE_FILTER)
|
|
|
|
? get_recurse_width(source)
|
|
|
|
: get_base_width(source);
|
2015-02-25 21:02:42 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
uint32_t obs_source_get_height(obs_source_t *source)
|
|
|
|
{
|
2015-10-16 18:49:45 -07:00
|
|
|
if (!data_valid(source, "obs_source_get_height"))
|
|
|
|
return 0;
|
2015-02-25 21:02:42 -08:00
|
|
|
|
2019-06-22 22:13:45 -07:00
|
|
|
return (source->info.type != OBS_SOURCE_TYPE_FILTER)
|
|
|
|
? get_recurse_height(source)
|
|
|
|
: get_base_height(source);
|
2015-02-25 21:02:42 -08:00
|
|
|
}
|
|
|
|
|
2015-03-08 06:56:41 -07:00
|
|
|
uint32_t obs_source_get_base_width(obs_source_t *source)
|
|
|
|
{
|
2015-10-16 18:49:45 -07:00
|
|
|
if (!data_valid(source, "obs_source_get_base_width"))
|
|
|
|
return 0;
|
2015-03-08 06:56:41 -07:00
|
|
|
|
|
|
|
return get_base_width(source);
|
|
|
|
}
|
|
|
|
|
|
|
|
uint32_t obs_source_get_base_height(obs_source_t *source)
|
|
|
|
{
|
2015-10-16 18:49:45 -07:00
|
|
|
if (!data_valid(source, "obs_source_get_base_height"))
|
|
|
|
return 0;
|
2015-03-08 06:56:41 -07:00
|
|
|
|
|
|
|
return get_base_height(source);
|
|
|
|
}
|
|
|
|
|
2014-09-26 15:25:59 -07:00
|
|
|
obs_source_t *obs_filter_get_parent(const obs_source_t *filter)
|
2013-12-22 00:30:18 -08:00
|
|
|
{
|
2019-06-22 22:13:45 -07:00
|
|
|
return obs_ptr_valid(filter, "obs_filter_get_parent")
|
|
|
|
? filter->filter_parent
|
|
|
|
: NULL;
|
2013-12-22 00:30:18 -08:00
|
|
|
}
|
|
|
|
|
2014-09-26 15:25:59 -07:00
|
|
|
obs_source_t *obs_filter_get_target(const obs_source_t *filter)
|
2013-09-30 19:37:13 -07:00
|
|
|
{
|
2019-06-22 22:13:45 -07:00
|
|
|
return obs_ptr_valid(filter, "obs_filter_get_target")
|
|
|
|
? filter->filter_target
|
|
|
|
: NULL;
|
2013-09-30 19:37:13 -07:00
|
|
|
}
|
|
|
|
|
2019-12-10 09:22:18 -08:00
|
|
|
#define OBS_SOURCE_AV (OBS_SOURCE_ASYNC_VIDEO | OBS_SOURCE_AUDIO)
|
|
|
|
|
2017-05-19 00:25:27 -07:00
|
|
|
static bool filter_compatible(obs_source_t *source, obs_source_t *filter)
|
|
|
|
{
|
2019-12-10 09:22:18 -08:00
|
|
|
uint32_t s_caps = source->info.output_flags & OBS_SOURCE_AV;
|
|
|
|
uint32_t f_caps = filter->info.output_flags & OBS_SOURCE_AV;
|
2017-05-19 00:25:27 -07:00
|
|
|
|
|
|
|
if ((f_caps & OBS_SOURCE_AUDIO) != 0 &&
|
|
|
|
(f_caps & OBS_SOURCE_VIDEO) == 0)
|
|
|
|
f_caps &= ~OBS_SOURCE_ASYNC;
|
|
|
|
|
|
|
|
return (s_caps & f_caps) == f_caps;
|
|
|
|
}
|
|
|
|
|
2014-09-25 17:44:05 -07:00
|
|
|
void obs_source_filter_add(obs_source_t *source, obs_source_t *filter)
|
2013-09-30 19:37:13 -07:00
|
|
|
{
|
2016-01-18 20:01:58 -08:00
|
|
|
struct calldata cd;
|
|
|
|
uint8_t stack[128];
|
2015-02-25 20:48:06 -08:00
|
|
|
|
2015-10-17 02:51:13 -07:00
|
|
|
if (!obs_source_valid(source, "obs_source_filter_add"))
|
|
|
|
return;
|
|
|
|
if (!obs_ptr_valid(filter, "obs_source_filter_add"))
|
2014-02-23 21:39:33 -08:00
|
|
|
return;
|
|
|
|
|
2013-10-24 00:57:55 -07:00
|
|
|
pthread_mutex_lock(&source->filter_mutex);
|
|
|
|
|
2013-10-17 17:21:42 -07:00
|
|
|
if (da_find(source->filters, &filter, 0) != DARRAY_INVALID) {
|
2013-09-30 19:37:13 -07:00
|
|
|
blog(LOG_WARNING, "Tried to add a filter that was already "
|
2019-06-22 22:13:45 -07:00
|
|
|
"present on the source");
|
2015-10-12 13:51:03 -07:00
|
|
|
pthread_mutex_unlock(&source->filter_mutex);
|
2013-09-30 19:37:13 -07:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2017-05-19 00:25:27 -07:00
|
|
|
if (!filter_compatible(source, filter)) {
|
|
|
|
pthread_mutex_unlock(&source->filter_mutex);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2015-02-25 21:05:04 -08:00
|
|
|
obs_source_addref(filter);
|
|
|
|
|
2015-02-25 21:08:09 -08:00
|
|
|
filter->filter_parent = source;
|
2019-06-22 22:13:45 -07:00
|
|
|
filter->filter_target = !source->filters.num ? source
|
|
|
|
: source->filters.array[0];
|
2015-02-25 21:08:09 -08:00
|
|
|
|
2015-03-14 06:50:51 -07:00
|
|
|
da_insert(source->filters, 0, &filter);
|
2013-10-24 00:57:55 -07:00
|
|
|
|
|
|
|
pthread_mutex_unlock(&source->filter_mutex);
|
|
|
|
|
2016-01-18 20:01:58 -08:00
|
|
|
calldata_init_fixed(&cd, stack, sizeof(stack));
|
2015-02-25 20:48:06 -08:00
|
|
|
calldata_set_ptr(&cd, "source", source);
|
|
|
|
calldata_set_ptr(&cd, "filter", filter);
|
|
|
|
|
|
|
|
signal_handler_signal(source->context.signals, "filter_add", &cd);
|
2016-05-04 20:29:26 -07:00
|
|
|
|
2017-02-25 07:45:45 -08:00
|
|
|
blog(LOG_DEBUG, "- filter '%s' (%s) added to source '%s'",
|
2019-06-22 22:13:45 -07:00
|
|
|
filter->context.name, filter->info.id, source->context.name);
|
2013-09-30 19:37:13 -07:00
|
|
|
}
|
|
|
|
|
2015-03-17 18:23:04 -07:00
|
|
|
static bool obs_source_filter_remove_refless(obs_source_t *source,
|
2019-06-22 22:13:45 -07:00
|
|
|
obs_source_t *filter)
|
2013-09-30 19:37:13 -07:00
|
|
|
{
|
2016-01-18 20:01:58 -08:00
|
|
|
struct calldata cd;
|
|
|
|
uint8_t stack[128];
|
2013-10-24 00:57:55 -07:00
|
|
|
size_t idx;
|
|
|
|
|
|
|
|
pthread_mutex_lock(&source->filter_mutex);
|
|
|
|
|
|
|
|
idx = da_find(source->filters, &filter, 0);
|
2015-10-12 13:51:03 -07:00
|
|
|
if (idx == DARRAY_INVALID) {
|
|
|
|
pthread_mutex_unlock(&source->filter_mutex);
|
2015-03-17 18:23:04 -07:00
|
|
|
return false;
|
2015-10-12 13:51:03 -07:00
|
|
|
}
|
2013-09-30 19:37:13 -07:00
|
|
|
|
|
|
|
if (idx > 0) {
|
2019-06-22 22:13:45 -07:00
|
|
|
obs_source_t *prev = source->filters.array[idx - 1];
|
2013-09-30 19:37:13 -07:00
|
|
|
prev->filter_target = filter->filter_target;
|
|
|
|
}
|
|
|
|
|
|
|
|
da_erase(source->filters, idx);
|
2013-10-24 00:57:55 -07:00
|
|
|
|
|
|
|
pthread_mutex_unlock(&source->filter_mutex);
|
|
|
|
|
2016-01-18 20:01:58 -08:00
|
|
|
calldata_init_fixed(&cd, stack, sizeof(stack));
|
2015-02-25 20:48:06 -08:00
|
|
|
calldata_set_ptr(&cd, "source", source);
|
|
|
|
calldata_set_ptr(&cd, "filter", filter);
|
|
|
|
|
|
|
|
signal_handler_signal(source->context.signals, "filter_remove", &cd);
|
|
|
|
|
2017-02-25 07:45:45 -08:00
|
|
|
blog(LOG_DEBUG, "- filter '%s' (%s) removed from source '%s'",
|
2019-06-22 22:13:45 -07:00
|
|
|
filter->context.name, filter->info.id, source->context.name);
|
2016-05-04 20:29:26 -07:00
|
|
|
|
2015-03-07 09:33:08 -08:00
|
|
|
if (filter->info.filter_remove)
|
|
|
|
filter->info.filter_remove(filter->context.data,
|
2019-06-22 22:13:45 -07:00
|
|
|
filter->filter_parent);
|
2015-03-07 09:33:08 -08:00
|
|
|
|
2013-10-24 00:57:55 -07:00
|
|
|
filter->filter_parent = NULL;
|
2013-09-30 19:37:13 -07:00
|
|
|
filter->filter_target = NULL;
|
2015-03-17 18:23:04 -07:00
|
|
|
return true;
|
|
|
|
}
|
2015-02-25 21:05:04 -08:00
|
|
|
|
2015-03-17 18:23:04 -07:00
|
|
|
void obs_source_filter_remove(obs_source_t *source, obs_source_t *filter)
|
|
|
|
{
|
2015-10-17 02:51:13 -07:00
|
|
|
if (!obs_source_valid(source, "obs_source_filter_remove"))
|
|
|
|
return;
|
|
|
|
if (!obs_ptr_valid(filter, "obs_source_filter_remove"))
|
|
|
|
return;
|
|
|
|
|
2015-03-17 18:23:04 -07:00
|
|
|
if (obs_source_filter_remove_refless(source, filter))
|
|
|
|
obs_source_release(filter);
|
2013-09-30 19:37:13 -07:00
|
|
|
}
|
|
|
|
|
2015-03-23 19:12:25 -07:00
|
|
|
static size_t find_next_filter(obs_source_t *source, obs_source_t *filter,
|
2019-06-22 22:13:45 -07:00
|
|
|
size_t cur_idx)
|
2015-03-23 19:12:25 -07:00
|
|
|
{
|
|
|
|
bool curAsync = (filter->info.output_flags & OBS_SOURCE_ASYNC) != 0;
|
|
|
|
bool nextAsync;
|
|
|
|
obs_source_t *next;
|
|
|
|
|
2019-06-22 22:13:45 -07:00
|
|
|
if (cur_idx == source->filters.num - 1)
|
2015-03-23 19:12:25 -07:00
|
|
|
return DARRAY_INVALID;
|
|
|
|
|
2019-06-22 22:13:45 -07:00
|
|
|
next = source->filters.array[cur_idx + 1];
|
2015-03-23 19:12:25 -07:00
|
|
|
nextAsync = (next->info.output_flags & OBS_SOURCE_ASYNC);
|
|
|
|
|
|
|
|
if (nextAsync == curAsync)
|
2019-06-22 22:13:45 -07:00
|
|
|
return cur_idx + 1;
|
2015-03-23 19:12:25 -07:00
|
|
|
else
|
2019-06-22 22:13:45 -07:00
|
|
|
return find_next_filter(source, filter, cur_idx + 1);
|
2015-03-23 19:12:25 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
static size_t find_prev_filter(obs_source_t *source, obs_source_t *filter,
|
2019-06-22 22:13:45 -07:00
|
|
|
size_t cur_idx)
|
2015-03-23 19:12:25 -07:00
|
|
|
{
|
|
|
|
bool curAsync = (filter->info.output_flags & OBS_SOURCE_ASYNC) != 0;
|
|
|
|
bool prevAsync;
|
|
|
|
obs_source_t *prev;
|
|
|
|
|
|
|
|
if (cur_idx == 0)
|
|
|
|
return DARRAY_INVALID;
|
|
|
|
|
2019-06-22 22:13:45 -07:00
|
|
|
prev = source->filters.array[cur_idx - 1];
|
2015-03-23 19:12:25 -07:00
|
|
|
prevAsync = (prev->info.output_flags & OBS_SOURCE_ASYNC);
|
|
|
|
|
|
|
|
if (prevAsync == curAsync)
|
2019-06-22 22:13:45 -07:00
|
|
|
return cur_idx - 1;
|
2015-03-23 19:12:25 -07:00
|
|
|
else
|
2019-06-22 22:13:45 -07:00
|
|
|
return find_prev_filter(source, filter, cur_idx - 1);
|
2015-03-23 19:12:25 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
/* moves filters above/below matching filter types */
|
2019-06-22 22:13:45 -07:00
|
|
|
static bool move_filter_dir(obs_source_t *source, obs_source_t *filter,
|
|
|
|
enum obs_order_movement movement)
|
2013-09-30 19:37:13 -07:00
|
|
|
{
|
2015-03-14 06:59:19 -07:00
|
|
|
size_t idx;
|
2014-02-23 21:39:33 -08:00
|
|
|
|
|
|
|
idx = da_find(source->filters, &filter, 0);
|
2013-10-17 17:21:42 -07:00
|
|
|
if (idx == DARRAY_INVALID)
|
2015-03-14 06:59:19 -07:00
|
|
|
return false;
|
2013-09-30 19:37:13 -07:00
|
|
|
|
2014-08-02 01:33:53 -07:00
|
|
|
if (movement == OBS_ORDER_MOVE_UP) {
|
2015-03-23 19:12:25 -07:00
|
|
|
size_t next_id = find_next_filter(source, filter, idx);
|
|
|
|
if (next_id == DARRAY_INVALID)
|
2015-03-14 06:59:19 -07:00
|
|
|
return false;
|
2015-03-23 19:12:25 -07:00
|
|
|
da_move_item(source->filters, idx, next_id);
|
2013-09-30 19:37:13 -07:00
|
|
|
|
2014-08-02 01:33:53 -07:00
|
|
|
} else if (movement == OBS_ORDER_MOVE_DOWN) {
|
2015-03-23 19:12:25 -07:00
|
|
|
size_t prev_id = find_prev_filter(source, filter, idx);
|
|
|
|
if (prev_id == DARRAY_INVALID)
|
2015-03-14 06:59:19 -07:00
|
|
|
return false;
|
2015-03-23 19:12:25 -07:00
|
|
|
da_move_item(source->filters, idx, prev_id);
|
2013-09-30 19:37:13 -07:00
|
|
|
|
2014-08-02 01:33:53 -07:00
|
|
|
} else if (movement == OBS_ORDER_MOVE_TOP) {
|
2019-06-22 22:13:45 -07:00
|
|
|
if (idx == source->filters.num - 1)
|
2015-03-14 06:59:19 -07:00
|
|
|
return false;
|
2019-06-22 22:13:45 -07:00
|
|
|
da_move_item(source->filters, idx, source->filters.num - 1);
|
2013-09-30 19:37:13 -07:00
|
|
|
|
2014-08-02 01:33:53 -07:00
|
|
|
} else if (movement == OBS_ORDER_MOVE_BOTTOM) {
|
2013-09-30 19:37:13 -07:00
|
|
|
if (idx == 0)
|
2015-03-14 06:59:19 -07:00
|
|
|
return false;
|
2013-09-30 19:37:13 -07:00
|
|
|
da_move_item(source->filters, idx, 0);
|
|
|
|
}
|
|
|
|
|
2013-12-20 10:56:01 -08:00
|
|
|
/* reorder filter targets, not the nicest way of dealing with things */
|
2015-03-14 06:59:19 -07:00
|
|
|
for (size_t i = 0; i < source->filters.num; i++) {
|
2019-06-22 22:13:45 -07:00
|
|
|
obs_source_t *next_filter =
|
|
|
|
(i == source->filters.num - 1)
|
|
|
|
? source
|
|
|
|
: source->filters.array[i + 1];
|
2015-03-14 06:52:54 -07:00
|
|
|
|
2013-09-30 19:37:13 -07:00
|
|
|
source->filters.array[i]->filter_target = next_filter;
|
|
|
|
}
|
2015-03-14 06:58:13 -07:00
|
|
|
|
2015-03-14 06:59:19 -07:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
void obs_source_filter_set_order(obs_source_t *source, obs_source_t *filter,
|
2019-06-22 22:13:45 -07:00
|
|
|
enum obs_order_movement movement)
|
2015-03-14 06:59:19 -07:00
|
|
|
{
|
|
|
|
bool success;
|
2015-10-17 02:51:13 -07:00
|
|
|
|
|
|
|
if (!obs_source_valid(source, "obs_source_filter_set_order"))
|
|
|
|
return;
|
|
|
|
if (!obs_ptr_valid(filter, "obs_source_filter_set_order"))
|
2015-03-14 06:59:19 -07:00
|
|
|
return;
|
|
|
|
|
|
|
|
pthread_mutex_lock(&source->filter_mutex);
|
|
|
|
success = move_filter_dir(source, filter, movement);
|
|
|
|
pthread_mutex_unlock(&source->filter_mutex);
|
|
|
|
|
|
|
|
if (success)
|
|
|
|
obs_source_dosignal(source, NULL, "reorder_filters");
|
2013-09-30 19:37:13 -07:00
|
|
|
}
|
|
|
|
|
2014-09-26 15:25:59 -07:00
|
|
|
obs_data_t *obs_source_get_settings(const obs_source_t *source)
|
2013-09-30 19:37:13 -07:00
|
|
|
{
|
2015-10-17 02:51:13 -07:00
|
|
|
if (!obs_source_valid(source, "obs_source_get_settings"))
|
|
|
|
return NULL;
|
2014-02-23 21:39:33 -08:00
|
|
|
|
libobs: Add services API, reduce repeated code
Add API for streaming services. The services API simplifies the
creation of custom service features and user interface.
Custom streaming services later on will be able to do things such as:
- Be able to use service-specific APIs via modules, allowing a more
direct means of communicating with the service and requesting or
setting service-specific information
- Get URL/stream key via other means of authentication such as OAuth,
or be able to build custom URLs for services that require that sort
of thing.
- Query information (such as viewer count, chat, follower
notifications, and other information)
- Set channel information (such as current game, current channel title,
activating commercials)
Also, I reduce some repeated code that was used for all libobs objects.
This includes the name of the object, the private data, settings, as
well as the signal and procedure handlers.
I also switched to using linked lists for the global object lists,
rather than using an array of pointers (you could say it was..
pointless.) ..Anyway, the linked list info is also stored in the shared
context data structure.
2014-04-19 20:38:53 -07:00
|
|
|
obs_data_addref(source->context.settings);
|
|
|
|
return source->context.settings;
|
2013-09-30 19:37:13 -07:00
|
|
|
}
|
|
|
|
|
2016-03-15 20:26:50 -07:00
|
|
|
struct obs_source_frame *filter_async_video(obs_source_t *source,
|
2019-06-22 22:13:45 -07:00
|
|
|
struct obs_source_frame *in)
|
2013-10-24 00:57:55 -07:00
|
|
|
{
|
|
|
|
size_t i;
|
2015-03-23 13:45:38 -07:00
|
|
|
|
|
|
|
pthread_mutex_lock(&source->filter_mutex);
|
|
|
|
|
2013-10-24 00:57:55 -07:00
|
|
|
for (i = source->filters.num; i > 0; i--) {
|
2019-06-22 22:13:45 -07:00
|
|
|
struct obs_source *filter = source->filters.array[i - 1];
|
2014-05-04 16:20:11 -07:00
|
|
|
|
2015-03-17 18:15:50 -07:00
|
|
|
if (!filter->enabled)
|
|
|
|
continue;
|
|
|
|
|
2014-05-04 16:20:11 -07:00
|
|
|
if (filter->context.data && filter->info.filter_video) {
|
libobs: Add services API, reduce repeated code
Add API for streaming services. The services API simplifies the
creation of custom service features and user interface.
Custom streaming services later on will be able to do things such as:
- Be able to use service-specific APIs via modules, allowing a more
direct means of communicating with the service and requesting or
setting service-specific information
- Get URL/stream key via other means of authentication such as OAuth,
or be able to build custom URLs for services that require that sort
of thing.
- Query information (such as viewer count, chat, follower
notifications, and other information)
- Set channel information (such as current game, current channel title,
activating commercials)
Also, I reduce some repeated code that was used for all libobs objects.
This includes the name of the object, the private data, settings, as
well as the signal and procedure handlers.
I also switched to using linked lists for the global object lists,
rather than using an array of pointers (you could say it was..
pointless.) ..Anyway, the linked list info is also stored in the shared
context data structure.
2014-04-19 20:38:53 -07:00
|
|
|
in = filter->info.filter_video(filter->context.data,
|
2019-06-22 22:13:45 -07:00
|
|
|
in);
|
2013-10-24 00:57:55 -07:00
|
|
|
if (!in)
|
2015-03-23 13:45:38 -07:00
|
|
|
break;
|
2013-10-24 00:57:55 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-03-23 13:45:38 -07:00
|
|
|
pthread_mutex_unlock(&source->filter_mutex);
|
|
|
|
|
2013-10-24 00:57:55 -07:00
|
|
|
return in;
|
|
|
|
}
|
|
|
|
|
2014-08-02 01:04:45 -07:00
|
|
|
static inline void copy_frame_data_line(struct obs_source_frame *dst,
|
2019-06-22 22:13:45 -07:00
|
|
|
const struct obs_source_frame *src,
|
|
|
|
uint32_t plane, uint32_t y)
|
2014-02-07 02:03:54 -08:00
|
|
|
{
|
2014-02-09 04:51:06 -08:00
|
|
|
uint32_t pos_src = y * src->linesize[plane];
|
|
|
|
uint32_t pos_dst = y * dst->linesize[plane];
|
2019-06-22 22:13:45 -07:00
|
|
|
uint32_t bytes = dst->linesize[plane] < src->linesize[plane]
|
|
|
|
? dst->linesize[plane]
|
|
|
|
: src->linesize[plane];
|
2014-02-07 02:03:54 -08:00
|
|
|
|
|
|
|
memcpy(dst->data[plane] + pos_dst, src->data[plane] + pos_src, bytes);
|
|
|
|
}
|
|
|
|
|
2014-08-02 01:04:45 -07:00
|
|
|
static inline void copy_frame_data_plane(struct obs_source_frame *dst,
|
2019-06-22 22:13:45 -07:00
|
|
|
const struct obs_source_frame *src,
|
|
|
|
uint32_t plane, uint32_t lines)
|
2014-02-07 02:03:54 -08:00
|
|
|
{
|
2014-02-09 04:51:06 -08:00
|
|
|
if (dst->linesize[plane] != src->linesize[plane])
|
2014-02-07 02:03:54 -08:00
|
|
|
for (uint32_t y = 0; y < lines; y++)
|
|
|
|
copy_frame_data_line(dst, src, plane, y);
|
|
|
|
else
|
|
|
|
memcpy(dst->data[plane], src->data[plane],
|
2019-06-22 22:13:45 -07:00
|
|
|
dst->linesize[plane] * lines);
|
2014-02-07 02:03:54 -08:00
|
|
|
}
|
|
|
|
|
2014-08-02 01:04:45 -07:00
|
|
|
static void copy_frame_data(struct obs_source_frame *dst,
|
2019-06-22 22:13:45 -07:00
|
|
|
const struct obs_source_frame *src)
|
2014-02-07 02:03:54 -08:00
|
|
|
{
|
2019-06-22 22:13:45 -07:00
|
|
|
dst->flip = src->flip;
|
|
|
|
dst->full_range = src->full_range;
|
|
|
|
dst->timestamp = src->timestamp;
|
2014-02-07 02:03:54 -08:00
|
|
|
memcpy(dst->color_matrix, src->color_matrix, sizeof(float) * 16);
|
2014-04-23 15:24:51 -07:00
|
|
|
if (!dst->full_range) {
|
|
|
|
size_t const size = sizeof(float) * 3;
|
|
|
|
memcpy(dst->color_range_min, src->color_range_min, size);
|
|
|
|
memcpy(dst->color_range_max, src->color_range_max, size);
|
|
|
|
}
|
2014-02-07 02:03:54 -08:00
|
|
|
|
2016-12-17 19:55:00 -08:00
|
|
|
switch (src->format) {
|
2014-02-07 02:03:54 -08:00
|
|
|
case VIDEO_FORMAT_I420:
|
|
|
|
copy_frame_data_plane(dst, src, 0, dst->height);
|
2019-06-22 22:13:45 -07:00
|
|
|
copy_frame_data_plane(dst, src, 1, dst->height / 2);
|
|
|
|
copy_frame_data_plane(dst, src, 2, dst->height / 2);
|
2014-02-07 02:03:54 -08:00
|
|
|
break;
|
|
|
|
|
|
|
|
case VIDEO_FORMAT_NV12:
|
|
|
|
copy_frame_data_plane(dst, src, 0, dst->height);
|
2019-06-22 22:13:45 -07:00
|
|
|
copy_frame_data_plane(dst, src, 1, dst->height / 2);
|
2014-02-07 02:03:54 -08:00
|
|
|
break;
|
|
|
|
|
2015-04-16 22:52:44 -07:00
|
|
|
case VIDEO_FORMAT_I444:
|
2019-06-17 22:25:18 -07:00
|
|
|
case VIDEO_FORMAT_I422:
|
2015-04-16 22:52:44 -07:00
|
|
|
copy_frame_data_plane(dst, src, 0, dst->height);
|
|
|
|
copy_frame_data_plane(dst, src, 1, dst->height);
|
|
|
|
copy_frame_data_plane(dst, src, 2, dst->height);
|
|
|
|
break;
|
|
|
|
|
2014-02-07 02:03:54 -08:00
|
|
|
case VIDEO_FORMAT_YVYU:
|
|
|
|
case VIDEO_FORMAT_YUY2:
|
|
|
|
case VIDEO_FORMAT_UYVY:
|
|
|
|
case VIDEO_FORMAT_NONE:
|
|
|
|
case VIDEO_FORMAT_RGBA:
|
|
|
|
case VIDEO_FORMAT_BGRA:
|
|
|
|
case VIDEO_FORMAT_BGRX:
|
2016-12-17 22:53:52 -08:00
|
|
|
case VIDEO_FORMAT_Y800:
|
2019-05-30 06:05:53 -07:00
|
|
|
case VIDEO_FORMAT_BGR3:
|
2019-08-11 11:26:22 -07:00
|
|
|
case VIDEO_FORMAT_AYUV:
|
2019-04-22 23:38:26 -07:00
|
|
|
copy_frame_data_plane(dst, src, 0, dst->height);
|
2016-12-17 22:53:52 -08:00
|
|
|
break;
|
2019-08-11 11:26:22 -07:00
|
|
|
|
|
|
|
case VIDEO_FORMAT_I40A:
|
|
|
|
copy_frame_data_plane(dst, src, 0, dst->height);
|
|
|
|
copy_frame_data_plane(dst, src, 1, dst->height / 2);
|
|
|
|
copy_frame_data_plane(dst, src, 2, dst->height / 2);
|
|
|
|
copy_frame_data_plane(dst, src, 3, dst->height);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case VIDEO_FORMAT_I42A:
|
|
|
|
case VIDEO_FORMAT_YUVA:
|
|
|
|
copy_frame_data_plane(dst, src, 0, dst->height);
|
|
|
|
copy_frame_data_plane(dst, src, 1, dst->height);
|
|
|
|
copy_frame_data_plane(dst, src, 2, dst->height);
|
|
|
|
copy_frame_data_plane(dst, src, 3, dst->height);
|
|
|
|
break;
|
2014-02-07 02:03:54 -08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-06-11 00:38:00 -07:00
|
|
|
void obs_source_frame_copy(struct obs_source_frame *dst,
|
2019-06-22 22:13:45 -07:00
|
|
|
const struct obs_source_frame *src)
|
2018-06-11 00:38:00 -07:00
|
|
|
{
|
|
|
|
copy_frame_data(dst, src);
|
|
|
|
}
|
|
|
|
|
2015-01-04 00:18:36 -08:00
|
|
|
static inline bool async_texture_changed(struct obs_source *source,
|
2019-06-22 22:13:45 -07:00
|
|
|
const struct obs_source_frame *frame)
|
2015-01-04 00:18:36 -08:00
|
|
|
{
|
|
|
|
enum convert_type prev, cur;
|
2019-04-22 23:38:26 -07:00
|
|
|
prev = get_convert_type(source->async_cache_format,
|
2019-06-22 22:13:45 -07:00
|
|
|
source->async_cache_full_range);
|
|
|
|
cur = get_convert_type(frame->format, frame->full_range);
|
2015-01-04 00:18:36 -08:00
|
|
|
|
2019-06-22 22:13:45 -07:00
|
|
|
return source->async_cache_width != frame->width ||
|
|
|
|
source->async_cache_height != frame->height || prev != cur;
|
2015-01-04 00:18:36 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void free_async_cache(struct obs_source *source)
|
|
|
|
{
|
|
|
|
for (size_t i = 0; i < source->async_cache.num; i++)
|
2015-03-07 08:47:49 -08:00
|
|
|
obs_source_frame_decref(source->async_cache.array[i].frame);
|
2015-01-04 00:18:36 -08:00
|
|
|
|
|
|
|
da_resize(source->async_cache, 0);
|
|
|
|
da_resize(source->async_frames, 0);
|
2015-03-27 10:03:47 -07:00
|
|
|
source->cur_async_frame = NULL;
|
2016-03-15 20:39:36 -07:00
|
|
|
source->prev_async_frame = NULL;
|
2015-01-04 00:18:36 -08:00
|
|
|
}
|
|
|
|
|
2015-03-27 00:02:18 -07:00
|
|
|
#define MAX_UNUSED_FRAME_DURATION 5
|
|
|
|
|
|
|
|
/* frees frame allocations if they haven't been used for a specific period
|
|
|
|
* of time */
|
|
|
|
static void clean_cache(obs_source_t *source)
|
|
|
|
{
|
|
|
|
for (size_t i = source->async_cache.num; i > 0; i--) {
|
|
|
|
struct async_frame *af = &source->async_cache.array[i - 1];
|
|
|
|
if (!af->used) {
|
|
|
|
if (++af->unused_count == MAX_UNUSED_FRAME_DURATION) {
|
|
|
|
obs_source_frame_destroy(af->frame);
|
|
|
|
da_erase(source->async_cache, i - 1);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
libobs: Reset frame cache if it gets too big
When buffering is enabled for an async video source, sometimes minor
drift in timestamps or unexpected delays to frames can cause frames to
slowly buffer more and more in memory, in some cases eventually causing
the system to run out of memory.
The circumstances in which this can happen seems to depend on both the
computer and the devices in use. So far, the only known circumstances
in which this happens are with heavily buffered devices, such as
hauppauge, where decoding can sometimes take too long and cause
continual frame playback delay, and thus continual buffering until
memory runs out. I've never been able to replicate it on any of my
machines however, even after hours of testing.
This patch is a precautionary measure that puts a hard limit on the
number of async frames that can be currently queued to prevent any case
where memory might continually build for whatever reason. If it goes
over the limit, it clears the cache to reset the buffering.
I had a user with this problem test this patch with success and positive
feedback, and the intervals between buffering resets were long to where
it wasn't even noticeable while streaming/recording.
Ideally when decoding frames (such as from those devices), frame
dropping should be used to ensure playback doesn't incur extra delay,
although this sort of hard limit on the frame cache should still be
implemented regardless just as a safety precaution. For DirectShow
encoded devices I should just switch to faruton's libff for decoding and
enable the frame dropping options. It would probably explain why no
one's ever reported it for the media source, and pretty much only from
DirectShow device usage.
2015-06-04 14:04:10 -07:00
|
|
|
#define MAX_ASYNC_FRAMES 30
|
2018-08-16 21:25:40 -07:00
|
|
|
//if return value is not null then do (os_atomic_dec_long(&output->refs) == 0) && obs_source_frame_destroy(output)
|
2019-06-22 22:13:45 -07:00
|
|
|
static inline struct obs_source_frame *
|
|
|
|
cache_video(struct obs_source *source, const struct obs_source_frame *frame)
|
2013-10-24 00:57:55 -07:00
|
|
|
{
|
2015-01-04 00:18:36 -08:00
|
|
|
struct obs_source_frame *new_frame = NULL;
|
|
|
|
|
|
|
|
pthread_mutex_lock(&source->async_mutex);
|
|
|
|
|
libobs: Reset frame cache if it gets too big
When buffering is enabled for an async video source, sometimes minor
drift in timestamps or unexpected delays to frames can cause frames to
slowly buffer more and more in memory, in some cases eventually causing
the system to run out of memory.
The circumstances in which this can happen seems to depend on both the
computer and the devices in use. So far, the only known circumstances
in which this happens are with heavily buffered devices, such as
hauppauge, where decoding can sometimes take too long and cause
continual frame playback delay, and thus continual buffering until
memory runs out. I've never been able to replicate it on any of my
machines however, even after hours of testing.
This patch is a precautionary measure that puts a hard limit on the
number of async frames that can be currently queued to prevent any case
where memory might continually build for whatever reason. If it goes
over the limit, it clears the cache to reset the buffering.
I had a user with this problem test this patch with success and positive
feedback, and the intervals between buffering resets were long to where
it wasn't even noticeable while streaming/recording.
Ideally when decoding frames (such as from those devices), frame
dropping should be used to ensure playback doesn't incur extra delay,
although this sort of hard limit on the frame cache should still be
implemented regardless just as a safety precaution. For DirectShow
encoded devices I should just switch to faruton's libff for decoding and
enable the frame dropping options. It would probably explain why no
one's ever reported it for the media source, and pretty much only from
DirectShow device usage.
2015-06-04 14:04:10 -07:00
|
|
|
if (source->async_frames.num >= MAX_ASYNC_FRAMES) {
|
|
|
|
free_async_cache(source);
|
|
|
|
source->last_frame_ts = 0;
|
|
|
|
pthread_mutex_unlock(&source->async_mutex);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2015-01-04 00:18:36 -08:00
|
|
|
if (async_texture_changed(source, frame)) {
|
2015-04-10 17:56:21 -07:00
|
|
|
free_async_cache(source);
|
2019-06-22 22:13:45 -07:00
|
|
|
source->async_cache_width = frame->width;
|
|
|
|
source->async_cache_height = frame->height;
|
2015-01-04 00:18:36 -08:00
|
|
|
}
|
|
|
|
|
2019-08-07 06:47:27 -07:00
|
|
|
const enum video_format format = frame->format;
|
|
|
|
source->async_cache_format = format;
|
|
|
|
source->async_cache_full_range = frame->full_range;
|
|
|
|
|
2015-01-04 00:18:36 -08:00
|
|
|
for (size_t i = 0; i < source->async_cache.num; i++) {
|
|
|
|
struct async_frame *af = &source->async_cache.array[i];
|
|
|
|
if (!af->used) {
|
|
|
|
new_frame = af->frame;
|
2019-08-07 06:47:27 -07:00
|
|
|
new_frame->format = format;
|
2015-01-04 00:18:36 -08:00
|
|
|
af->used = true;
|
2015-03-27 00:02:18 -07:00
|
|
|
af->unused_count = 0;
|
2015-01-06 14:29:40 -08:00
|
|
|
break;
|
2015-01-04 00:18:36 -08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-03-27 00:02:18 -07:00
|
|
|
clean_cache(source);
|
|
|
|
|
2015-01-04 00:18:36 -08:00
|
|
|
if (!new_frame) {
|
|
|
|
struct async_frame new_af;
|
2016-12-17 19:55:00 -08:00
|
|
|
|
2019-06-22 22:13:45 -07:00
|
|
|
new_frame = obs_source_frame_create(format, frame->width,
|
|
|
|
frame->height);
|
2015-01-04 00:18:36 -08:00
|
|
|
new_af.frame = new_frame;
|
|
|
|
new_af.used = true;
|
2015-03-27 00:02:18 -07:00
|
|
|
new_af.unused_count = 0;
|
2015-03-07 08:47:49 -08:00
|
|
|
new_frame->refs = 1;
|
2015-01-04 00:18:36 -08:00
|
|
|
|
|
|
|
da_push_back(source->async_cache, &new_af);
|
|
|
|
}
|
|
|
|
|
2015-04-10 19:09:18 -07:00
|
|
|
os_atomic_inc_long(&new_frame->refs);
|
|
|
|
|
2015-01-04 00:18:36 -08:00
|
|
|
pthread_mutex_unlock(&source->async_mutex);
|
2013-10-26 14:32:06 -07:00
|
|
|
|
2014-02-07 02:03:54 -08:00
|
|
|
copy_frame_data(new_frame, frame);
|
2015-04-10 19:09:18 -07:00
|
|
|
|
2013-10-26 14:32:06 -07:00
|
|
|
return new_frame;
|
2013-10-24 00:57:55 -07:00
|
|
|
}
|
|
|
|
|
2019-06-22 22:13:45 -07:00
|
|
|
static void
|
|
|
|
obs_source_output_video_internal(obs_source_t *source,
|
|
|
|
const struct obs_source_frame *frame)
|
2013-10-24 00:57:55 -07:00
|
|
|
{
|
2015-10-17 02:51:13 -07:00
|
|
|
if (!obs_source_valid(source, "obs_source_output_video"))
|
2014-02-23 21:39:33 -08:00
|
|
|
return;
|
|
|
|
|
2015-03-26 22:49:48 -07:00
|
|
|
if (!frame) {
|
|
|
|
source->async_active = false;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2019-06-22 22:13:45 -07:00
|
|
|
struct obs_source_frame *output = !!frame ? cache_video(source, frame)
|
|
|
|
: NULL;
|
2013-10-24 00:57:55 -07:00
|
|
|
|
2015-03-07 08:47:49 -08:00
|
|
|
/* ------------------------------------------- */
|
2018-08-16 21:25:40 -07:00
|
|
|
pthread_mutex_lock(&source->async_mutex);
|
2013-10-26 14:32:06 -07:00
|
|
|
if (output) {
|
2018-08-16 21:25:40 -07:00
|
|
|
if (os_atomic_dec_long(&output->refs) == 0) {
|
|
|
|
obs_source_frame_destroy(output);
|
|
|
|
output = NULL;
|
|
|
|
} else {
|
|
|
|
da_push_back(source->async_frames, &output);
|
|
|
|
source->async_active = true;
|
|
|
|
}
|
2013-10-26 14:32:06 -07:00
|
|
|
}
|
2018-08-16 21:25:40 -07:00
|
|
|
pthread_mutex_unlock(&source->async_mutex);
|
2013-10-24 00:57:55 -07:00
|
|
|
}
|
|
|
|
|
2019-04-25 12:44:30 -07:00
|
|
|
void obs_source_output_video(obs_source_t *source,
|
2019-06-22 22:13:45 -07:00
|
|
|
const struct obs_source_frame *frame)
|
2019-04-25 12:44:30 -07:00
|
|
|
{
|
|
|
|
if (!frame) {
|
|
|
|
obs_source_output_video_internal(source, NULL);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
struct obs_source_frame new_frame = *frame;
|
2019-06-22 22:13:45 -07:00
|
|
|
new_frame.full_range =
|
|
|
|
format_is_yuv(frame->format) ? new_frame.full_range : true;
|
2019-04-25 12:44:30 -07:00
|
|
|
|
|
|
|
obs_source_output_video_internal(source, &new_frame);
|
|
|
|
}
|
|
|
|
|
|
|
|
void obs_source_output_video2(obs_source_t *source,
|
2019-06-22 22:13:45 -07:00
|
|
|
const struct obs_source_frame2 *frame)
|
2019-04-25 12:44:30 -07:00
|
|
|
{
|
|
|
|
if (!frame) {
|
|
|
|
obs_source_output_video_internal(source, NULL);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
struct obs_source_frame new_frame;
|
2019-06-22 22:13:45 -07:00
|
|
|
enum video_range_type range =
|
|
|
|
resolve_video_range(frame->format, frame->range);
|
2019-04-25 12:44:30 -07:00
|
|
|
|
|
|
|
for (size_t i = 0; i < MAX_AV_PLANES; i++) {
|
|
|
|
new_frame.data[i] = frame->data[i];
|
|
|
|
new_frame.linesize[i] = frame->linesize[i];
|
|
|
|
}
|
|
|
|
|
|
|
|
new_frame.width = frame->width;
|
|
|
|
new_frame.height = frame->height;
|
|
|
|
new_frame.timestamp = frame->timestamp;
|
|
|
|
new_frame.format = frame->format;
|
|
|
|
new_frame.full_range = range == VIDEO_RANGE_FULL;
|
|
|
|
new_frame.flip = frame->flip;
|
|
|
|
|
|
|
|
memcpy(&new_frame.color_matrix, &frame->color_matrix,
|
2019-06-22 22:13:45 -07:00
|
|
|
sizeof(frame->color_matrix));
|
2019-04-25 12:44:30 -07:00
|
|
|
memcpy(&new_frame.color_range_min, &frame->color_range_min,
|
2019-06-22 22:13:45 -07:00
|
|
|
sizeof(frame->color_range_min));
|
2019-04-25 12:44:30 -07:00
|
|
|
memcpy(&new_frame.color_range_max, &frame->color_range_max,
|
2019-06-22 22:13:45 -07:00
|
|
|
sizeof(frame->color_range_max));
|
2019-04-25 12:44:30 -07:00
|
|
|
|
|
|
|
obs_source_output_video_internal(source, &new_frame);
|
|
|
|
}
|
|
|
|
|
2017-03-26 05:51:45 -07:00
|
|
|
static inline bool preload_frame_changed(obs_source_t *source,
|
2019-06-22 22:13:45 -07:00
|
|
|
const struct obs_source_frame *in)
|
2017-03-26 05:51:45 -07:00
|
|
|
{
|
|
|
|
if (!source->async_preload_frame)
|
|
|
|
return true;
|
|
|
|
|
2019-06-22 22:13:45 -07:00
|
|
|
return in->width != source->async_preload_frame->width ||
|
2017-03-26 05:51:45 -07:00
|
|
|
in->height != source->async_preload_frame->height ||
|
|
|
|
in->format != source->async_preload_frame->format;
|
|
|
|
}
|
|
|
|
|
2019-06-22 22:13:45 -07:00
|
|
|
static void
|
|
|
|
obs_source_preload_video_internal(obs_source_t *source,
|
|
|
|
const struct obs_source_frame *frame)
|
2017-03-26 05:51:45 -07:00
|
|
|
{
|
|
|
|
if (!obs_source_valid(source, "obs_source_preload_video"))
|
|
|
|
return;
|
|
|
|
if (!frame)
|
|
|
|
return;
|
|
|
|
|
|
|
|
obs_enter_graphics();
|
|
|
|
|
|
|
|
if (preload_frame_changed(source, frame)) {
|
|
|
|
obs_source_frame_destroy(source->async_preload_frame);
|
|
|
|
source->async_preload_frame = obs_source_frame_create(
|
2019-06-22 22:13:45 -07:00
|
|
|
frame->format, frame->width, frame->height);
|
2017-03-26 05:51:45 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
copy_frame_data(source->async_preload_frame, frame);
|
|
|
|
set_async_texture_size(source, source->async_preload_frame);
|
2019-08-09 20:43:14 -07:00
|
|
|
update_async_textures(source, source->async_preload_frame,
|
|
|
|
source->async_textures, source->async_texrender);
|
2017-03-26 05:51:45 -07:00
|
|
|
|
|
|
|
source->last_frame_ts = frame->timestamp;
|
|
|
|
|
|
|
|
obs_leave_graphics();
|
|
|
|
}
|
|
|
|
|
2019-04-25 12:44:30 -07:00
|
|
|
void obs_source_preload_video(obs_source_t *source,
|
2019-06-22 22:13:45 -07:00
|
|
|
const struct obs_source_frame *frame)
|
2019-04-25 12:44:30 -07:00
|
|
|
{
|
|
|
|
if (!frame) {
|
|
|
|
obs_source_preload_video_internal(source, NULL);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
struct obs_source_frame new_frame = *frame;
|
2019-06-22 22:13:45 -07:00
|
|
|
new_frame.full_range =
|
|
|
|
format_is_yuv(frame->format) ? new_frame.full_range : true;
|
2019-04-25 12:44:30 -07:00
|
|
|
|
|
|
|
obs_source_preload_video_internal(source, &new_frame);
|
|
|
|
}
|
|
|
|
|
|
|
|
void obs_source_preload_video2(obs_source_t *source,
|
2019-06-22 22:13:45 -07:00
|
|
|
const struct obs_source_frame2 *frame)
|
2019-04-25 12:44:30 -07:00
|
|
|
{
|
|
|
|
if (!frame) {
|
|
|
|
obs_source_preload_video_internal(source, NULL);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
struct obs_source_frame new_frame;
|
2019-06-22 22:13:45 -07:00
|
|
|
enum video_range_type range =
|
|
|
|
resolve_video_range(frame->format, frame->range);
|
2019-04-25 12:44:30 -07:00
|
|
|
|
|
|
|
for (size_t i = 0; i < MAX_AV_PLANES; i++) {
|
|
|
|
new_frame.data[i] = frame->data[i];
|
|
|
|
new_frame.linesize[i] = frame->linesize[i];
|
|
|
|
}
|
|
|
|
|
|
|
|
new_frame.width = frame->width;
|
|
|
|
new_frame.height = frame->height;
|
|
|
|
new_frame.timestamp = frame->timestamp;
|
|
|
|
new_frame.format = frame->format;
|
|
|
|
new_frame.full_range = range == VIDEO_RANGE_FULL;
|
|
|
|
new_frame.flip = frame->flip;
|
|
|
|
|
|
|
|
memcpy(&new_frame.color_matrix, &frame->color_matrix,
|
2019-06-22 22:13:45 -07:00
|
|
|
sizeof(frame->color_matrix));
|
2019-04-25 12:44:30 -07:00
|
|
|
memcpy(&new_frame.color_range_min, &frame->color_range_min,
|
2019-06-22 22:13:45 -07:00
|
|
|
sizeof(frame->color_range_min));
|
2019-04-25 12:44:30 -07:00
|
|
|
memcpy(&new_frame.color_range_max, &frame->color_range_max,
|
2019-06-22 22:13:45 -07:00
|
|
|
sizeof(frame->color_range_max));
|
2019-04-25 12:44:30 -07:00
|
|
|
|
|
|
|
obs_source_preload_video_internal(source, &new_frame);
|
|
|
|
}
|
|
|
|
|
2017-03-26 05:51:45 -07:00
|
|
|
void obs_source_show_preloaded_video(obs_source_t *source)
|
|
|
|
{
|
|
|
|
uint64_t sys_ts;
|
|
|
|
|
|
|
|
if (!obs_source_valid(source, "obs_source_show_preloaded_video"))
|
|
|
|
return;
|
|
|
|
|
|
|
|
source->async_active = true;
|
|
|
|
|
|
|
|
pthread_mutex_lock(&source->audio_buf_mutex);
|
2018-12-03 08:44:55 -08:00
|
|
|
sys_ts = (source->monitoring_type != OBS_MONITORING_TYPE_MONITOR_ONLY)
|
2019-06-22 22:13:45 -07:00
|
|
|
? os_gettime_ns()
|
|
|
|
: 0;
|
2017-03-26 05:51:45 -07:00
|
|
|
reset_audio_timing(source, source->last_frame_ts, sys_ts);
|
|
|
|
reset_audio_data(source, sys_ts);
|
|
|
|
pthread_mutex_unlock(&source->audio_buf_mutex);
|
|
|
|
}
|
|
|
|
|
2019-06-22 22:13:45 -07:00
|
|
|
static inline struct obs_audio_data *
|
|
|
|
filter_async_audio(obs_source_t *source, struct obs_audio_data *in)
|
2013-10-24 00:57:55 -07:00
|
|
|
{
|
|
|
|
size_t i;
|
|
|
|
for (i = source->filters.num; i > 0; i--) {
|
2019-06-22 22:13:45 -07:00
|
|
|
struct obs_source *filter = source->filters.array[i - 1];
|
2014-05-04 16:20:11 -07:00
|
|
|
|
2015-03-17 18:15:50 -07:00
|
|
|
if (!filter->enabled)
|
|
|
|
continue;
|
|
|
|
|
2014-05-04 16:20:11 -07:00
|
|
|
if (filter->context.data && filter->info.filter_audio) {
|
libobs: Add services API, reduce repeated code
Add API for streaming services. The services API simplifies the
creation of custom service features and user interface.
Custom streaming services later on will be able to do things such as:
- Be able to use service-specific APIs via modules, allowing a more
direct means of communicating with the service and requesting or
setting service-specific information
- Get URL/stream key via other means of authentication such as OAuth,
or be able to build custom URLs for services that require that sort
of thing.
- Query information (such as viewer count, chat, follower
notifications, and other information)
- Set channel information (such as current game, current channel title,
activating commercials)
Also, I reduce some repeated code that was used for all libobs objects.
This includes the name of the object, the private data, settings, as
well as the signal and procedure handlers.
I also switched to using linked lists for the global object lists,
rather than using an array of pointers (you could say it was..
pointless.) ..Anyway, the linked list info is also stored in the shared
context data structure.
2014-04-19 20:38:53 -07:00
|
|
|
in = filter->info.filter_audio(filter->context.data,
|
2019-06-22 22:13:45 -07:00
|
|
|
in);
|
2013-10-24 00:57:55 -07:00
|
|
|
if (!in)
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return in;
|
|
|
|
}
|
|
|
|
|
2014-09-25 17:44:05 -07:00
|
|
|
static inline void reset_resampler(obs_source_t *source,
|
2019-06-22 22:13:45 -07:00
|
|
|
const struct obs_source_audio *audio)
|
2013-10-24 00:57:55 -07:00
|
|
|
{
|
Simplify media i/o interfaces
Completely revamped the entire media i/o data and handlers. The
original idea was to have a system that would have connecting media
inputs and outputs, but at a certain point I realized that this was an
unnecessary complexity for what we wanted to do. (Also, it reminded me
of directshow filters, and I HATE directshow with a passion, and
wouldn't wish it upon my greatest enemy)
Now, audio/video outputs are connected to directly, with better callback
handlers, and will eventually have the ability to automatically handle
conversions such as 4:4:4 to 4:2:0 when connecting to an input that uses
them. Doing this will allow the video/audio i/o handlers to also
prevent duplicate conversion, as well as make it easier/simple to use.
My true goal for this is to make output and encoder plugins as simple to
create as possible. I want to be able to be able to create an output
plugin with almost no real hassle of having to worry about image
conversions, media inputs/outputs, etc. A plugin developer shouldn't
have to handle that sort of stuff when he/she doesn't really need to.
Plugins will be able to simply create a callback via obs_video() and/or
obs_audio(), and they will automatically receive the audio/video data in
the formats requested via a simple callback, without needing to do
almost anything else at all.
2014-01-14 00:58:47 -08:00
|
|
|
const struct audio_output_info *obs_info;
|
2013-10-31 10:28:47 -07:00
|
|
|
struct resample_info output_info;
|
|
|
|
|
2014-08-05 15:07:54 -07:00
|
|
|
obs_info = audio_output_get_info(obs->audio.audio);
|
2013-11-20 14:00:16 -08:00
|
|
|
|
2019-06-22 22:13:45 -07:00
|
|
|
output_info.format = obs_info->format;
|
|
|
|
output_info.samples_per_sec = obs_info->samples_per_sec;
|
|
|
|
output_info.speakers = obs_info->speakers;
|
2013-10-31 10:28:47 -07:00
|
|
|
|
2019-06-22 22:13:45 -07:00
|
|
|
source->sample_info.format = audio->format;
|
2013-10-31 10:28:47 -07:00
|
|
|
source->sample_info.samples_per_sec = audio->samples_per_sec;
|
2019-06-22 22:13:45 -07:00
|
|
|
source->sample_info.speakers = audio->speakers;
|
2013-10-31 10:28:47 -07:00
|
|
|
|
2015-03-13 01:03:01 -07:00
|
|
|
audio_resampler_destroy(source->resampler);
|
|
|
|
source->resampler = NULL;
|
2016-01-16 10:30:09 -08:00
|
|
|
source->resample_offset = 0;
|
2015-03-13 01:03:01 -07:00
|
|
|
|
2013-10-31 10:28:47 -07:00
|
|
|
if (source->sample_info.samples_per_sec == obs_info->samples_per_sec &&
|
2019-06-22 22:13:45 -07:00
|
|
|
source->sample_info.format == obs_info->format &&
|
|
|
|
source->sample_info.speakers == obs_info->speakers) {
|
2013-10-31 10:28:47 -07:00
|
|
|
source->audio_failed = false;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2019-06-22 22:13:45 -07:00
|
|
|
source->resampler =
|
|
|
|
audio_resampler_create(&output_info, &source->sample_info);
|
2013-10-31 10:28:47 -07:00
|
|
|
|
|
|
|
source->audio_failed = source->resampler == NULL;
|
|
|
|
if (source->resampler == NULL)
|
|
|
|
blog(LOG_ERROR, "creation of resampler failed");
|
|
|
|
}
|
|
|
|
|
2019-06-22 22:13:45 -07:00
|
|
|
static void copy_audio_data(obs_source_t *source, const uint8_t *const data[],
|
|
|
|
uint32_t frames, uint64_t ts)
|
2013-10-31 10:28:47 -07:00
|
|
|
{
|
2019-06-22 22:13:45 -07:00
|
|
|
size_t planes = audio_output_get_planes(obs->audio.audio);
|
2014-08-05 15:07:54 -07:00
|
|
|
size_t blocksize = audio_output_get_block_size(obs->audio.audio);
|
2019-06-22 22:13:45 -07:00
|
|
|
size_t size = (size_t)frames * blocksize;
|
|
|
|
bool resize = source->audio_storage_size < size;
|
2013-10-31 10:28:47 -07:00
|
|
|
|
2019-06-22 22:13:45 -07:00
|
|
|
source->audio_data.frames = frames;
|
2014-02-14 14:13:36 -08:00
|
|
|
source->audio_data.timestamp = ts;
|
2014-02-07 02:03:54 -08:00
|
|
|
|
|
|
|
for (size_t i = 0; i < planes; i++) {
|
|
|
|
/* ensure audio storage capacity */
|
|
|
|
if (resize) {
|
|
|
|
bfree(source->audio_data.data[i]);
|
|
|
|
source->audio_data.data[i] = bmalloc(size);
|
|
|
|
}
|
|
|
|
|
|
|
|
memcpy(source->audio_data.data[i], data[i], size);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (resize)
|
|
|
|
source->audio_storage_size = size;
|
2013-10-31 10:28:47 -07:00
|
|
|
}
|
|
|
|
|
2014-12-28 00:34:35 -08:00
|
|
|
/* TODO: SSE optimization */
|
|
|
|
static void downmix_to_mono_planar(struct obs_source *source, uint32_t frames)
|
|
|
|
{
|
2014-12-28 03:51:06 -08:00
|
|
|
size_t channels = audio_output_get_channels(obs->audio.audio);
|
2014-12-28 00:34:35 -08:00
|
|
|
const float channels_i = 1.0f / (float)channels;
|
2019-06-22 22:13:45 -07:00
|
|
|
float **data = (float **)source->audio_data.data;
|
2014-12-28 00:34:35 -08:00
|
|
|
|
2014-12-28 03:51:06 -08:00
|
|
|
for (size_t channel = 1; channel < channels; channel++) {
|
2014-12-28 00:34:35 -08:00
|
|
|
for (uint32_t frame = 0; frame < frames; frame++)
|
|
|
|
data[0][frame] += data[channel][frame];
|
|
|
|
}
|
|
|
|
|
|
|
|
for (uint32_t frame = 0; frame < frames; frame++)
|
|
|
|
data[0][frame] *= channels_i;
|
|
|
|
|
2014-12-28 03:51:06 -08:00
|
|
|
for (size_t channel = 1; channel < channels; channel++) {
|
2014-12-28 00:34:35 -08:00
|
|
|
for (uint32_t frame = 0; frame < frames; frame++)
|
|
|
|
data[channel][frame] = data[0][frame];
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-10-08 03:15:28 -07:00
|
|
|
static void process_audio_balancing(struct obs_source *source, uint32_t frames,
|
2019-06-22 22:13:45 -07:00
|
|
|
float balance, enum obs_balance_type type)
|
2017-10-08 03:15:28 -07:00
|
|
|
{
|
2019-06-22 22:13:45 -07:00
|
|
|
float **data = (float **)source->audio_data.data;
|
2017-10-08 03:15:28 -07:00
|
|
|
|
2019-06-22 22:13:45 -07:00
|
|
|
switch (type) {
|
2017-10-08 03:15:28 -07:00
|
|
|
case OBS_BALANCE_TYPE_SINE_LAW:
|
|
|
|
for (uint32_t frame = 0; frame < frames; frame++) {
|
|
|
|
data[0][frame] = data[0][frame] *
|
2019-06-22 22:13:45 -07:00
|
|
|
sinf((1.0f - balance) * (M_PI / 2.0f));
|
|
|
|
data[1][frame] =
|
|
|
|
data[1][frame] * sinf(balance * (M_PI / 2.0f));
|
2017-10-08 03:15:28 -07:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
case OBS_BALANCE_TYPE_SQUARE_LAW:
|
|
|
|
for (uint32_t frame = 0; frame < frames; frame++) {
|
|
|
|
data[0][frame] = data[0][frame] * sqrtf(1.0f - balance);
|
|
|
|
data[1][frame] = data[1][frame] * sqrtf(balance);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case OBS_BALANCE_TYPE_LINEAR:
|
|
|
|
for (uint32_t frame = 0; frame < frames; frame++) {
|
|
|
|
data[0][frame] = data[0][frame] * (1.0f - balance);
|
|
|
|
data[1][frame] = data[1][frame] * balance;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-10-31 10:28:47 -07:00
|
|
|
/* resamples/remixes new audio to the designated main audio output format */
|
2014-09-25 17:44:05 -07:00
|
|
|
static void process_audio(obs_source_t *source,
|
2019-06-22 22:13:45 -07:00
|
|
|
const struct obs_source_audio *audio)
|
2013-10-31 10:28:47 -07:00
|
|
|
{
|
2014-12-28 00:34:35 -08:00
|
|
|
uint32_t frames = audio->frames;
|
2015-03-07 04:47:12 -08:00
|
|
|
bool mono_output;
|
2014-12-28 00:34:35 -08:00
|
|
|
|
2013-10-31 10:28:47 -07:00
|
|
|
if (source->sample_info.samples_per_sec != audio->samples_per_sec ||
|
2019-06-22 22:13:45 -07:00
|
|
|
source->sample_info.format != audio->format ||
|
|
|
|
source->sample_info.speakers != audio->speakers)
|
2013-10-31 10:28:47 -07:00
|
|
|
reset_resampler(source, audio);
|
|
|
|
|
|
|
|
if (source->audio_failed)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (source->resampler) {
|
2019-06-22 22:13:45 -07:00
|
|
|
uint8_t *output[MAX_AV_PLANES];
|
2013-10-31 10:28:47 -07:00
|
|
|
|
2014-02-07 02:03:54 -08:00
|
|
|
memset(output, 0, sizeof(output));
|
|
|
|
|
2019-06-22 22:13:45 -07:00
|
|
|
audio_resampler_resample(source->resampler, output, &frames,
|
|
|
|
&source->resample_offset, audio->data,
|
|
|
|
audio->frames);
|
2013-10-31 10:28:47 -07:00
|
|
|
|
2014-02-14 14:13:36 -08:00
|
|
|
copy_audio_data(source, (const uint8_t *const *)output, frames,
|
2016-01-16 10:30:09 -08:00
|
|
|
audio->timestamp);
|
2013-10-31 10:28:47 -07:00
|
|
|
} else {
|
|
|
|
copy_audio_data(source, audio->data, audio->frames,
|
|
|
|
audio->timestamp);
|
|
|
|
}
|
2014-12-28 00:34:35 -08:00
|
|
|
|
2015-03-07 04:47:12 -08:00
|
|
|
mono_output = audio_output_get_channels(obs->audio.audio) == 1;
|
|
|
|
|
2018-11-23 03:24:11 -08:00
|
|
|
if (!mono_output && source->sample_info.speakers == SPEAKERS_STEREO &&
|
|
|
|
(source->balance > 0.51f || source->balance < 0.49f)) {
|
2017-10-08 03:15:28 -07:00
|
|
|
process_audio_balancing(source, frames, source->balance,
|
2019-06-22 22:13:45 -07:00
|
|
|
OBS_BALANCE_TYPE_SINE_LAW);
|
2017-10-08 03:15:28 -07:00
|
|
|
}
|
|
|
|
|
2015-03-07 04:47:12 -08:00
|
|
|
if (!mono_output && (source->flags & OBS_SOURCE_FLAG_FORCE_MONO) != 0)
|
|
|
|
downmix_to_mono_planar(source, frames);
|
2013-10-24 00:57:55 -07:00
|
|
|
}
|
|
|
|
|
2014-09-25 17:44:05 -07:00
|
|
|
void obs_source_output_audio(obs_source_t *source,
|
2019-06-22 22:13:45 -07:00
|
|
|
const struct obs_source_audio *audio)
|
2013-10-24 00:57:55 -07:00
|
|
|
{
|
2014-08-02 08:32:53 -07:00
|
|
|
struct obs_audio_data *output;
|
2013-10-24 00:57:55 -07:00
|
|
|
|
2015-10-17 02:51:13 -07:00
|
|
|
if (!obs_source_valid(source, "obs_source_output_audio"))
|
|
|
|
return;
|
|
|
|
if (!obs_ptr_valid(audio, "obs_source_output_audio"))
|
2014-02-23 21:39:33 -08:00
|
|
|
return;
|
|
|
|
|
2013-10-31 10:28:47 -07:00
|
|
|
process_audio(source, audio);
|
2013-10-24 00:57:55 -07:00
|
|
|
|
|
|
|
pthread_mutex_lock(&source->filter_mutex);
|
2013-10-31 10:28:47 -07:00
|
|
|
output = filter_async_audio(source, &source->audio_data);
|
2013-10-24 00:57:55 -07:00
|
|
|
|
|
|
|
if (output) {
|
2015-03-12 22:27:36 -07:00
|
|
|
struct audio_data data;
|
Revamp API and start using doxygen
The API used to be designed in such a way to where it would expect
exports for each individual source/output/encoder/etc. You would export
functions for each and it would automatically load those functions based
on a specific naming scheme from the module.
The idea behind this was that I wanted to limit the usage of structures
in the API so only functions could be used. It was an interesting idea
in theory, but this idea turned out to be flawed in a number of ways:
1.) Requiring exports to create sources/outputs/encoders/etc meant that
you could not create them by any other means, which meant that
things like faruton's .net plugin would become difficult.
2.) Export function declarations could not be checked, therefore if you
created a function with the wrong parameters and parameter types,
the compiler wouldn't know how to check for that.
3.) Required overly complex load functions in libobs just to handle it.
It makes much more sense to just have a load function that you call
manually. Complexity is the bane of all good programs.
4.) It required that you have functions of specific names, which looked
and felt somewhat unsightly.
So, to fix these issues, I replaced it with a more commonly used API
scheme, seen commonly in places like kernels and typical C libraries
with abstraction. You simply create a structure that contains the
callback definitions, and you pass it to a function to register that
definition (such as obs_register_source), which you call in the
obs_module_load of the module.
It will also automatically check the structure size and ensure that it
only loads the required values if the structure happened to add new
values in an API change.
The "main" source file for each module must include obs-module.h, and
must use OBS_DECLARE_MODULE() within that source file.
Also, started writing some doxygen documentation in to the main library
headers. Will add more detailed documentation as I go.
2014-02-12 07:04:50 -08:00
|
|
|
|
2015-03-12 22:27:36 -07:00
|
|
|
for (int i = 0; i < MAX_AV_PLANES; i++)
|
|
|
|
data.data[i] = output->data[i];
|
2014-02-07 02:03:54 -08:00
|
|
|
|
2019-06-22 22:13:45 -07:00
|
|
|
data.frames = output->frames;
|
2015-03-12 22:27:36 -07:00
|
|
|
data.timestamp = output->timestamp;
|
2013-10-24 00:57:55 -07:00
|
|
|
|
2015-03-12 22:27:36 -07:00
|
|
|
pthread_mutex_lock(&source->audio_mutex);
|
2015-12-17 04:28:35 -08:00
|
|
|
source_output_audio_data(source, &data);
|
2013-10-24 00:57:55 -07:00
|
|
|
pthread_mutex_unlock(&source->audio_mutex);
|
|
|
|
}
|
|
|
|
|
|
|
|
pthread_mutex_unlock(&source->filter_mutex);
|
|
|
|
}
|
|
|
|
|
2016-03-15 20:26:50 -07:00
|
|
|
void remove_async_frame(obs_source_t *source, struct obs_source_frame *frame)
|
2015-01-04 00:18:36 -08:00
|
|
|
{
|
2016-03-15 20:39:36 -07:00
|
|
|
if (frame)
|
|
|
|
frame->prev_frame = false;
|
|
|
|
|
2015-01-04 00:18:36 -08:00
|
|
|
for (size_t i = 0; i < source->async_cache.num; i++) {
|
|
|
|
struct async_frame *f = &source->async_cache.array[i];
|
|
|
|
|
|
|
|
if (f->frame == frame) {
|
|
|
|
f->used = false;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-09-12 00:41:56 -07:00
|
|
|
/* #define DEBUG_ASYNC_FRAMES 1 */
|
|
|
|
|
2014-09-25 17:44:05 -07:00
|
|
|
static bool ready_async_frame(obs_source_t *source, uint64_t sys_time)
|
2014-01-12 01:40:51 -08:00
|
|
|
{
|
2015-01-03 23:19:09 -08:00
|
|
|
struct obs_source_frame *next_frame = source->async_frames.array[0];
|
2019-06-22 22:13:45 -07:00
|
|
|
struct obs_source_frame *frame = NULL;
|
2014-01-12 01:40:51 -08:00
|
|
|
uint64_t sys_offset = sys_time - source->last_sys_timestamp;
|
|
|
|
uint64_t frame_time = next_frame->timestamp;
|
|
|
|
uint64_t frame_offset = 0;
|
|
|
|
|
2017-05-13 23:32:40 -07:00
|
|
|
if (source->async_unbuffered) {
|
2015-01-03 23:19:09 -08:00
|
|
|
while (source->async_frames.num > 1) {
|
|
|
|
da_erase(source->async_frames, 0);
|
2015-01-04 00:18:36 -08:00
|
|
|
remove_async_frame(source, next_frame);
|
2015-01-03 23:19:09 -08:00
|
|
|
next_frame = source->async_frames.array[0];
|
2014-10-23 09:56:50 -07:00
|
|
|
}
|
|
|
|
|
2017-02-05 21:26:22 -08:00
|
|
|
source->last_frame_ts = next_frame->timestamp;
|
2014-10-23 09:56:50 -07:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2014-09-12 00:41:56 -07:00
|
|
|
#if DEBUG_ASYNC_FRAMES
|
2019-06-22 22:13:45 -07:00
|
|
|
blog(LOG_DEBUG,
|
|
|
|
"source->last_frame_ts: %llu, frame_time: %llu, "
|
|
|
|
"sys_offset: %llu, frame_offset: %llu, "
|
|
|
|
"number of frames: %lu",
|
|
|
|
source->last_frame_ts, frame_time, sys_offset,
|
|
|
|
frame_time - source->last_frame_ts,
|
|
|
|
(unsigned long)source->async_frames.num);
|
2014-09-12 00:41:56 -07:00
|
|
|
#endif
|
|
|
|
|
2014-01-12 01:40:51 -08:00
|
|
|
/* account for timestamp invalidation */
|
|
|
|
if (frame_out_of_bounds(source, frame_time)) {
|
2014-09-12 00:41:56 -07:00
|
|
|
#if DEBUG_ASYNC_FRAMES
|
|
|
|
blog(LOG_DEBUG, "timing jump");
|
|
|
|
#endif
|
2014-01-12 01:40:51 -08:00
|
|
|
source->last_frame_ts = next_frame->timestamp;
|
2014-09-12 00:41:56 -07:00
|
|
|
return true;
|
2014-01-12 01:40:51 -08:00
|
|
|
} else {
|
|
|
|
frame_offset = frame_time - source->last_frame_ts;
|
2014-09-12 00:41:56 -07:00
|
|
|
source->last_frame_ts += sys_offset;
|
2014-01-12 01:40:51 -08:00
|
|
|
}
|
|
|
|
|
2014-09-12 00:41:56 -07:00
|
|
|
while (source->last_frame_ts > next_frame->timestamp) {
|
|
|
|
|
|
|
|
/* this tries to reduce the needless frame duplication, also
|
|
|
|
* helps smooth out async rendering to frame boundaries. In
|
|
|
|
* other words, tries to keep the framerate as smooth as
|
|
|
|
* possible */
|
2015-06-04 17:10:00 -07:00
|
|
|
if ((source->last_frame_ts - next_frame->timestamp) < 2000000)
|
2014-09-12 00:41:56 -07:00
|
|
|
break;
|
|
|
|
|
|
|
|
if (frame)
|
2015-01-03 23:19:09 -08:00
|
|
|
da_erase(source->async_frames, 0);
|
2014-09-12 00:41:56 -07:00
|
|
|
|
|
|
|
#if DEBUG_ASYNC_FRAMES
|
2019-06-22 22:13:45 -07:00
|
|
|
blog(LOG_DEBUG,
|
|
|
|
"new frame, "
|
|
|
|
"source->last_frame_ts: %llu, "
|
|
|
|
"next_frame->timestamp: %llu",
|
|
|
|
source->last_frame_ts, next_frame->timestamp);
|
2014-09-12 00:41:56 -07:00
|
|
|
#endif
|
|
|
|
|
2015-01-04 00:18:36 -08:00
|
|
|
remove_async_frame(source, frame);
|
2014-01-12 01:40:51 -08:00
|
|
|
|
2015-01-03 23:19:09 -08:00
|
|
|
if (source->async_frames.num == 1)
|
2014-04-28 20:38:15 -07:00
|
|
|
return true;
|
|
|
|
|
2014-01-12 01:40:51 -08:00
|
|
|
frame = next_frame;
|
2015-01-03 23:19:09 -08:00
|
|
|
next_frame = source->async_frames.array[1];
|
2014-01-12 01:40:51 -08:00
|
|
|
|
|
|
|
/* more timestamp checking and compensating */
|
2014-10-19 09:17:53 -07:00
|
|
|
if ((next_frame->timestamp - frame_time) > MAX_TS_VAR) {
|
2014-09-12 00:41:56 -07:00
|
|
|
#if DEBUG_ASYNC_FRAMES
|
|
|
|
blog(LOG_DEBUG, "timing jump");
|
|
|
|
#endif
|
2014-01-12 01:40:51 -08:00
|
|
|
source->last_frame_ts =
|
|
|
|
next_frame->timestamp - frame_offset;
|
|
|
|
}
|
|
|
|
|
2019-06-22 22:13:45 -07:00
|
|
|
frame_time = next_frame->timestamp;
|
2014-01-12 01:40:51 -08:00
|
|
|
frame_offset = frame_time - source->last_frame_ts;
|
|
|
|
}
|
|
|
|
|
2014-09-12 00:41:56 -07:00
|
|
|
#if DEBUG_ASYNC_FRAMES
|
|
|
|
if (!frame)
|
|
|
|
blog(LOG_DEBUG, "no frame!");
|
|
|
|
#endif
|
2014-05-04 16:20:11 -07:00
|
|
|
|
2014-04-28 20:38:15 -07:00
|
|
|
return frame != NULL;
|
|
|
|
}
|
|
|
|
|
2014-09-25 17:44:05 -07:00
|
|
|
static inline struct obs_source_frame *get_closest_frame(obs_source_t *source,
|
2019-06-22 22:13:45 -07:00
|
|
|
uint64_t sys_time)
|
2014-04-28 20:38:15 -07:00
|
|
|
{
|
2015-03-26 22:49:48 -07:00
|
|
|
if (!source->async_frames.num)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
if (!source->last_frame_ts || ready_async_frame(source, sys_time)) {
|
2015-01-03 23:19:09 -08:00
|
|
|
struct obs_source_frame *frame = source->async_frames.array[0];
|
|
|
|
da_erase(source->async_frames, 0);
|
2015-03-26 22:49:48 -07:00
|
|
|
|
|
|
|
if (!source->last_frame_ts)
|
|
|
|
source->last_frame_ts = frame->timestamp;
|
|
|
|
|
2014-04-28 20:38:15 -07:00
|
|
|
return frame;
|
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
2014-01-12 01:40:51 -08:00
|
|
|
}
|
|
|
|
|
2013-12-07 10:23:49 -08:00
|
|
|
/*
|
2013-10-26 14:32:06 -07:00
|
|
|
* Ensures that cached frames are displayed on time. If multiple frames
|
|
|
|
* were cached between renders, then releases the unnecessary frames and uses
|
2014-01-12 01:40:51 -08:00
|
|
|
* the frame with the closest timing to ensure sync. Also ensures that timing
|
|
|
|
* with audio is synchronized.
|
2013-10-26 14:32:06 -07:00
|
|
|
*/
|
2014-09-25 17:44:05 -07:00
|
|
|
struct obs_source_frame *obs_source_get_frame(obs_source_t *source)
|
2013-09-30 19:37:13 -07:00
|
|
|
{
|
2014-08-02 01:04:45 -07:00
|
|
|
struct obs_source_frame *frame = NULL;
|
2013-10-26 14:32:06 -07:00
|
|
|
|
2015-10-17 02:51:13 -07:00
|
|
|
if (!obs_source_valid(source, "obs_source_get_frame"))
|
2014-02-23 21:39:33 -08:00
|
|
|
return NULL;
|
|
|
|
|
2015-01-03 23:19:09 -08:00
|
|
|
pthread_mutex_lock(&source->async_mutex);
|
2013-10-26 14:32:06 -07:00
|
|
|
|
2015-03-26 22:49:48 -07:00
|
|
|
frame = source->cur_async_frame;
|
|
|
|
source->cur_async_frame = NULL;
|
2014-01-12 02:24:01 -08:00
|
|
|
|
|
|
|
if (frame) {
|
2015-03-07 08:47:49 -08:00
|
|
|
os_atomic_inc_long(&frame->refs);
|
2013-10-26 14:32:06 -07:00
|
|
|
}
|
|
|
|
|
2015-01-03 23:19:09 -08:00
|
|
|
pthread_mutex_unlock(&source->async_mutex);
|
2013-11-22 15:18:31 -08:00
|
|
|
|
2013-10-26 14:32:06 -07:00
|
|
|
return frame;
|
2013-09-30 19:37:13 -07:00
|
|
|
}
|
|
|
|
|
2014-09-25 17:44:05 -07:00
|
|
|
void obs_source_release_frame(obs_source_t *source,
|
2019-06-22 22:13:45 -07:00
|
|
|
struct obs_source_frame *frame)
|
2013-09-30 19:37:13 -07:00
|
|
|
{
|
2015-10-22 17:45:58 -07:00
|
|
|
if (!frame)
|
2015-03-07 08:47:49 -08:00
|
|
|
return;
|
|
|
|
|
|
|
|
if (!source) {
|
|
|
|
obs_source_frame_destroy(frame);
|
|
|
|
} else {
|
|
|
|
pthread_mutex_lock(&source->async_mutex);
|
|
|
|
|
|
|
|
if (os_atomic_dec_long(&frame->refs) == 0)
|
|
|
|
obs_source_frame_destroy(frame);
|
|
|
|
else
|
|
|
|
remove_async_frame(source, frame);
|
|
|
|
|
|
|
|
pthread_mutex_unlock(&source->async_mutex);
|
2013-11-22 15:18:31 -08:00
|
|
|
}
|
2013-09-30 19:37:13 -07:00
|
|
|
}
|
2013-12-20 18:35:12 -08:00
|
|
|
|
2014-09-26 15:25:59 -07:00
|
|
|
const char *obs_source_get_name(const obs_source_t *source)
|
2013-12-20 18:35:12 -08:00
|
|
|
{
|
2019-06-22 22:13:45 -07:00
|
|
|
return obs_source_valid(source, "obs_source_get_name")
|
|
|
|
? source->context.name
|
|
|
|
: NULL;
|
2013-12-20 18:35:12 -08:00
|
|
|
}
|
|
|
|
|
2014-09-25 17:44:05 -07:00
|
|
|
void obs_source_set_name(obs_source_t *source, const char *name)
|
2013-12-20 18:35:12 -08:00
|
|
|
{
|
2015-10-17 02:51:13 -07:00
|
|
|
if (!obs_source_valid(source, "obs_source_set_name"))
|
|
|
|
return;
|
2014-06-30 00:05:35 -07:00
|
|
|
|
2016-06-11 11:39:18 -07:00
|
|
|
if (!name || !*name || !source->context.name ||
|
2019-06-22 22:13:45 -07:00
|
|
|
strcmp(name, source->context.name) != 0) {
|
2014-06-30 00:05:35 -07:00
|
|
|
struct calldata data;
|
|
|
|
char *prev_name = bstrdup(source->context.name);
|
|
|
|
obs_context_data_setname(&source->context, name);
|
|
|
|
|
|
|
|
calldata_init(&data);
|
2014-08-05 17:49:28 -07:00
|
|
|
calldata_set_ptr(&data, "source", source);
|
|
|
|
calldata_set_string(&data, "new_name", source->context.name);
|
|
|
|
calldata_set_string(&data, "prev_name", prev_name);
|
2016-01-09 13:27:16 -08:00
|
|
|
if (!source->context.private)
|
|
|
|
signal_handler_signal(obs->signals, "source_rename",
|
2019-06-22 22:13:45 -07:00
|
|
|
&data);
|
2014-06-30 00:05:35 -07:00
|
|
|
signal_handler_signal(source->context.signals, "rename", &data);
|
|
|
|
calldata_free(&data);
|
|
|
|
bfree(prev_name);
|
|
|
|
}
|
2013-12-20 18:35:12 -08:00
|
|
|
}
|
|
|
|
|
2014-09-26 15:25:59 -07:00
|
|
|
enum obs_source_type obs_source_get_type(const obs_source_t *source)
|
2013-12-20 18:35:12 -08:00
|
|
|
{
|
2019-06-22 22:13:45 -07:00
|
|
|
return obs_source_valid(source, "obs_source_get_type")
|
|
|
|
? source->info.type
|
|
|
|
: OBS_SOURCE_TYPE_INPUT;
|
2014-08-02 12:42:47 -07:00
|
|
|
}
|
2014-02-23 21:39:33 -08:00
|
|
|
|
2014-09-26 15:25:59 -07:00
|
|
|
const char *obs_source_get_id(const obs_source_t *source)
|
2014-08-02 12:42:47 -07:00
|
|
|
{
|
2019-06-22 22:13:45 -07:00
|
|
|
return obs_source_valid(source, "obs_source_get_id") ? source->info.id
|
|
|
|
: NULL;
|
2013-12-20 18:35:12 -08:00
|
|
|
}
|
2013-12-22 00:30:18 -08:00
|
|
|
|
2014-09-25 17:44:05 -07:00
|
|
|
static inline void render_filter_bypass(obs_source_t *target,
|
2019-06-22 22:13:45 -07:00
|
|
|
gs_effect_t *effect,
|
|
|
|
const char *tech_name)
|
2013-12-22 00:30:18 -08:00
|
|
|
{
|
2019-06-22 22:13:45 -07:00
|
|
|
gs_technique_t *tech = gs_effect_get_technique(effect, tech_name);
|
|
|
|
size_t passes, i;
|
2013-12-22 00:30:18 -08:00
|
|
|
|
2014-08-07 23:42:07 -07:00
|
|
|
passes = gs_technique_begin(tech);
|
2013-12-22 00:30:18 -08:00
|
|
|
for (i = 0; i < passes; i++) {
|
2014-08-07 23:42:07 -07:00
|
|
|
gs_technique_begin_pass(tech, i);
|
2013-12-22 00:30:18 -08:00
|
|
|
obs_source_video_render(target);
|
2014-08-07 23:42:07 -07:00
|
|
|
gs_technique_end_pass(tech);
|
2013-12-22 00:30:18 -08:00
|
|
|
}
|
2014-08-07 23:42:07 -07:00
|
|
|
gs_technique_end(tech);
|
2013-12-22 00:30:18 -08:00
|
|
|
}
|
|
|
|
|
2014-09-25 17:44:05 -07:00
|
|
|
static inline void render_filter_tex(gs_texture_t *tex, gs_effect_t *effect,
|
2019-06-22 22:13:45 -07:00
|
|
|
uint32_t width, uint32_t height,
|
|
|
|
const char *tech_name)
|
2013-12-22 00:30:18 -08:00
|
|
|
{
|
2019-06-22 22:13:45 -07:00
|
|
|
gs_technique_t *tech = gs_effect_get_technique(effect, tech_name);
|
|
|
|
gs_eparam_t *image = gs_effect_get_param_by_name(effect, "image");
|
|
|
|
size_t passes, i;
|
2013-12-22 00:30:18 -08:00
|
|
|
|
2014-08-07 23:42:07 -07:00
|
|
|
gs_effect_set_texture(image, tex);
|
2013-12-22 00:30:18 -08:00
|
|
|
|
2014-08-07 23:42:07 -07:00
|
|
|
passes = gs_technique_begin(tech);
|
2013-12-22 00:30:18 -08:00
|
|
|
for (i = 0; i < passes; i++) {
|
2014-08-07 23:42:07 -07:00
|
|
|
gs_technique_begin_pass(tech, i);
|
2015-03-08 06:57:54 -07:00
|
|
|
gs_draw_sprite(tex, 0, width, height);
|
2014-08-07 23:42:07 -07:00
|
|
|
gs_technique_end_pass(tech);
|
2013-12-22 00:30:18 -08:00
|
|
|
}
|
2014-08-07 23:42:07 -07:00
|
|
|
gs_technique_end(tech);
|
2013-12-22 00:30:18 -08:00
|
|
|
}
|
|
|
|
|
2015-02-26 17:47:10 -08:00
|
|
|
static inline bool can_bypass(obs_source_t *target, obs_source_t *parent,
|
2019-06-22 22:13:45 -07:00
|
|
|
uint32_t parent_flags,
|
|
|
|
enum obs_allow_direct_render allow_direct)
|
2015-02-26 17:47:10 -08:00
|
|
|
{
|
|
|
|
return (target == parent) &&
|
2019-06-22 22:13:45 -07:00
|
|
|
(allow_direct == OBS_ALLOW_DIRECT_RENDERING) &&
|
|
|
|
((parent_flags & OBS_SOURCE_CUSTOM_DRAW) == 0) &&
|
|
|
|
((parent_flags & OBS_SOURCE_ASYNC) == 0);
|
2015-02-26 17:47:10 -08:00
|
|
|
}
|
|
|
|
|
2016-04-22 10:15:50 -07:00
|
|
|
bool obs_source_process_filter_begin(obs_source_t *filter,
|
2019-06-22 22:13:45 -07:00
|
|
|
enum gs_color_format format,
|
|
|
|
enum obs_allow_direct_render allow_direct)
|
2013-12-22 00:30:18 -08:00
|
|
|
{
|
2014-09-25 17:44:05 -07:00
|
|
|
obs_source_t *target, *parent;
|
2019-06-22 22:13:45 -07:00
|
|
|
uint32_t parent_flags;
|
|
|
|
int cx, cy;
|
2014-02-23 21:39:33 -08:00
|
|
|
|
2015-10-17 02:51:13 -07:00
|
|
|
if (!obs_ptr_valid(filter, "obs_source_process_filter_begin"))
|
2016-04-22 10:15:50 -07:00
|
|
|
return false;
|
2014-02-23 21:39:33 -08:00
|
|
|
|
2019-06-22 22:13:45 -07:00
|
|
|
target = obs_filter_get_target(filter);
|
|
|
|
parent = obs_filter_get_parent(filter);
|
2016-04-09 17:40:26 -07:00
|
|
|
|
|
|
|
if (!target) {
|
|
|
|
blog(LOG_INFO, "filter '%s' being processed with no target!",
|
2019-06-22 22:13:45 -07:00
|
|
|
filter->context.name);
|
2016-04-22 10:15:50 -07:00
|
|
|
return false;
|
2016-04-09 17:40:26 -07:00
|
|
|
}
|
|
|
|
if (!parent) {
|
|
|
|
blog(LOG_INFO, "filter '%s' being processed with no parent!",
|
2019-06-22 22:13:45 -07:00
|
|
|
filter->context.name);
|
2016-04-22 10:15:50 -07:00
|
|
|
return false;
|
2016-04-09 17:40:26 -07:00
|
|
|
}
|
|
|
|
|
2014-02-23 21:39:33 -08:00
|
|
|
parent_flags = parent->info.output_flags;
|
2019-06-22 22:13:45 -07:00
|
|
|
cx = get_base_width(target);
|
|
|
|
cy = get_base_height(target);
|
2013-12-22 00:30:18 -08:00
|
|
|
|
2015-03-08 11:48:36 -07:00
|
|
|
filter->allow_direct = allow_direct;
|
|
|
|
|
2013-12-22 00:30:18 -08:00
|
|
|
/* if the parent does not use any custom effects, and this is the last
|
|
|
|
* filter in the chain for the parent, then render the parent directly
|
|
|
|
* using the filter effect instead of rendering to texture to reduce
|
|
|
|
* the total number of passes */
|
2015-02-26 17:47:10 -08:00
|
|
|
if (can_bypass(target, parent, parent_flags, allow_direct)) {
|
2016-04-22 10:15:50 -07:00
|
|
|
return true;
|
2013-12-22 00:30:18 -08:00
|
|
|
}
|
|
|
|
|
2015-11-20 11:23:12 -08:00
|
|
|
if (!cx || !cy) {
|
|
|
|
obs_source_skip_video_filter(filter);
|
2016-04-22 10:15:50 -07:00
|
|
|
return false;
|
2015-11-20 11:23:12 -08:00
|
|
|
}
|
|
|
|
|
Revamp API and start using doxygen
The API used to be designed in such a way to where it would expect
exports for each individual source/output/encoder/etc. You would export
functions for each and it would automatically load those functions based
on a specific naming scheme from the module.
The idea behind this was that I wanted to limit the usage of structures
in the API so only functions could be used. It was an interesting idea
in theory, but this idea turned out to be flawed in a number of ways:
1.) Requiring exports to create sources/outputs/encoders/etc meant that
you could not create them by any other means, which meant that
things like faruton's .net plugin would become difficult.
2.) Export function declarations could not be checked, therefore if you
created a function with the wrong parameters and parameter types,
the compiler wouldn't know how to check for that.
3.) Required overly complex load functions in libobs just to handle it.
It makes much more sense to just have a load function that you call
manually. Complexity is the bane of all good programs.
4.) It required that you have functions of specific names, which looked
and felt somewhat unsightly.
So, to fix these issues, I replaced it with a more commonly used API
scheme, seen commonly in places like kernels and typical C libraries
with abstraction. You simply create a structure that contains the
callback definitions, and you pass it to a function to register that
definition (such as obs_register_source), which you call in the
obs_module_load of the module.
It will also automatically check the structure size and ensure that it
only loads the required values if the structure happened to add new
values in an API change.
The "main" source file for each module must include obs-module.h, and
must use OBS_DECLARE_MODULE() within that source file.
Also, started writing some doxygen documentation in to the main library
headers. Will add more detailed documentation as I go.
2014-02-12 07:04:50 -08:00
|
|
|
if (!filter->filter_texrender)
|
2019-06-22 22:13:45 -07:00
|
|
|
filter->filter_texrender =
|
|
|
|
gs_texrender_create(format, GS_ZS_NONE);
|
Revamp API and start using doxygen
The API used to be designed in such a way to where it would expect
exports for each individual source/output/encoder/etc. You would export
functions for each and it would automatically load those functions based
on a specific naming scheme from the module.
The idea behind this was that I wanted to limit the usage of structures
in the API so only functions could be used. It was an interesting idea
in theory, but this idea turned out to be flawed in a number of ways:
1.) Requiring exports to create sources/outputs/encoders/etc meant that
you could not create them by any other means, which meant that
things like faruton's .net plugin would become difficult.
2.) Export function declarations could not be checked, therefore if you
created a function with the wrong parameters and parameter types,
the compiler wouldn't know how to check for that.
3.) Required overly complex load functions in libobs just to handle it.
It makes much more sense to just have a load function that you call
manually. Complexity is the bane of all good programs.
4.) It required that you have functions of specific names, which looked
and felt somewhat unsightly.
So, to fix these issues, I replaced it with a more commonly used API
scheme, seen commonly in places like kernels and typical C libraries
with abstraction. You simply create a structure that contains the
callback definitions, and you pass it to a function to register that
definition (such as obs_register_source), which you call in the
obs_module_load of the module.
It will also automatically check the structure size and ensure that it
only loads the required values if the structure happened to add new
values in an API change.
The "main" source file for each module must include obs-module.h, and
must use OBS_DECLARE_MODULE() within that source file.
Also, started writing some doxygen documentation in to the main library
headers. Will add more detailed documentation as I go.
2014-02-12 07:04:50 -08:00
|
|
|
|
2015-03-14 00:32:45 -07:00
|
|
|
gs_blend_state_push();
|
|
|
|
gs_blend_function(GS_BLEND_ONE, GS_BLEND_ZERO);
|
|
|
|
|
2014-08-07 23:42:07 -07:00
|
|
|
if (gs_texrender_begin(filter->filter_texrender, cx, cy)) {
|
2015-03-08 07:02:25 -07:00
|
|
|
bool custom_draw = (parent_flags & OBS_SOURCE_CUSTOM_DRAW) != 0;
|
|
|
|
bool async = (parent_flags & OBS_SOURCE_ASYNC) != 0;
|
2015-02-26 20:12:30 -08:00
|
|
|
struct vec4 clear_color;
|
|
|
|
|
2019-04-25 08:36:41 -07:00
|
|
|
vec4_zero(&clear_color);
|
2015-02-26 20:12:30 -08:00
|
|
|
gs_clear(GS_CLEAR_COLOR, &clear_color, 0.0f, 0);
|
2013-12-22 00:30:18 -08:00
|
|
|
gs_ortho(0.0f, (float)cx, 0.0f, (float)cy, -100.0f, 100.0f);
|
2015-02-26 20:12:30 -08:00
|
|
|
|
2015-03-08 07:02:25 -07:00
|
|
|
if (target == parent && !custom_draw && !async)
|
2016-03-03 17:16:49 -08:00
|
|
|
obs_source_default_render(target);
|
2015-03-08 07:02:25 -07:00
|
|
|
else
|
|
|
|
obs_source_video_render(target);
|
2015-02-26 20:12:30 -08:00
|
|
|
|
2014-08-07 23:42:07 -07:00
|
|
|
gs_texrender_end(filter->filter_texrender);
|
2013-12-22 00:30:18 -08:00
|
|
|
}
|
2015-03-14 00:32:45 -07:00
|
|
|
|
|
|
|
gs_blend_state_pop();
|
2016-04-22 10:15:50 -07:00
|
|
|
return true;
|
2015-03-08 11:48:36 -07:00
|
|
|
}
|
|
|
|
|
2019-06-22 22:13:45 -07:00
|
|
|
void obs_source_process_filter_tech_end(obs_source_t *filter,
|
|
|
|
gs_effect_t *effect, uint32_t width,
|
|
|
|
uint32_t height, const char *tech_name)
|
2015-05-22 11:01:00 -07:00
|
|
|
{
|
|
|
|
obs_source_t *target, *parent;
|
|
|
|
gs_texture_t *texture;
|
2019-06-22 22:13:45 -07:00
|
|
|
uint32_t parent_flags;
|
2015-05-22 11:01:00 -07:00
|
|
|
|
2019-06-22 22:13:45 -07:00
|
|
|
if (!filter)
|
|
|
|
return;
|
2015-05-22 11:01:00 -07:00
|
|
|
|
2019-06-22 22:13:45 -07:00
|
|
|
target = obs_filter_get_target(filter);
|
|
|
|
parent = obs_filter_get_parent(filter);
|
2016-04-09 17:40:26 -07:00
|
|
|
|
|
|
|
if (!target || !parent)
|
|
|
|
return;
|
|
|
|
|
2015-05-22 11:01:00 -07:00
|
|
|
parent_flags = parent->info.output_flags;
|
|
|
|
|
|
|
|
const char *tech = tech_name ? tech_name : "Draw";
|
|
|
|
|
|
|
|
if (can_bypass(target, parent, parent_flags, filter->allow_direct)) {
|
|
|
|
render_filter_bypass(target, effect, tech);
|
|
|
|
} else {
|
|
|
|
texture = gs_texrender_get_texture(filter->filter_texrender);
|
2019-07-22 20:50:10 -07:00
|
|
|
if (texture) {
|
|
|
|
render_filter_tex(texture, effect, width, height, tech);
|
|
|
|
}
|
2015-05-22 11:01:00 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-03-08 11:48:36 -07:00
|
|
|
void obs_source_process_filter_end(obs_source_t *filter, gs_effect_t *effect,
|
2019-06-22 22:13:45 -07:00
|
|
|
uint32_t width, uint32_t height)
|
2015-03-08 11:48:36 -07:00
|
|
|
{
|
2015-10-17 02:51:13 -07:00
|
|
|
if (!obs_ptr_valid(filter, "obs_source_process_filter_end"))
|
|
|
|
return;
|
2013-12-22 00:30:18 -08:00
|
|
|
|
2019-07-22 20:50:10 -07:00
|
|
|
obs_source_process_filter_tech_end(filter, effect, width, height,
|
|
|
|
"Draw");
|
2013-12-22 00:30:18 -08:00
|
|
|
}
|
2013-12-26 22:10:15 -08:00
|
|
|
|
2015-02-26 20:15:10 -08:00
|
|
|
void obs_source_skip_video_filter(obs_source_t *filter)
|
|
|
|
{
|
|
|
|
obs_source_t *target, *parent;
|
|
|
|
bool custom_draw, async;
|
|
|
|
uint32_t parent_flags;
|
|
|
|
|
2015-10-17 02:51:13 -07:00
|
|
|
if (!obs_ptr_valid(filter, "obs_source_skip_video_filter"))
|
|
|
|
return;
|
2015-02-26 20:15:10 -08:00
|
|
|
|
|
|
|
target = obs_filter_get_target(filter);
|
|
|
|
parent = obs_filter_get_parent(filter);
|
|
|
|
parent_flags = parent->info.output_flags;
|
|
|
|
custom_draw = (parent_flags & OBS_SOURCE_CUSTOM_DRAW) != 0;
|
|
|
|
async = (parent_flags & OBS_SOURCE_ASYNC) != 0;
|
|
|
|
|
2015-04-08 07:14:28 -07:00
|
|
|
if (target == parent) {
|
|
|
|
if (!custom_draw && !async)
|
2016-03-03 17:16:49 -08:00
|
|
|
obs_source_default_render(target);
|
2015-04-08 07:14:28 -07:00
|
|
|
else if (target->info.video_render)
|
|
|
|
obs_source_main_render(target);
|
2016-03-15 20:39:36 -07:00
|
|
|
else if (deinterlacing_enabled(target))
|
|
|
|
deinterlace_render(target);
|
2015-04-08 07:14:28 -07:00
|
|
|
else
|
|
|
|
obs_source_render_async_video(target);
|
|
|
|
|
|
|
|
} else {
|
|
|
|
obs_source_video_render(target);
|
|
|
|
}
|
2015-02-26 20:15:10 -08:00
|
|
|
}
|
|
|
|
|
2014-09-26 15:25:59 -07:00
|
|
|
signal_handler_t *obs_source_get_signal_handler(const obs_source_t *source)
|
2013-12-26 22:10:15 -08:00
|
|
|
{
|
2019-06-22 22:13:45 -07:00
|
|
|
return obs_source_valid(source, "obs_source_get_signal_handler")
|
|
|
|
? source->context.signals
|
|
|
|
: NULL;
|
2013-12-26 22:10:15 -08:00
|
|
|
}
|
|
|
|
|
2014-09-26 15:25:59 -07:00
|
|
|
proc_handler_t *obs_source_get_proc_handler(const obs_source_t *source)
|
2013-12-26 22:10:15 -08:00
|
|
|
{
|
2019-06-22 22:13:45 -07:00
|
|
|
return obs_source_valid(source, "obs_source_get_proc_handler")
|
|
|
|
? source->context.procs
|
|
|
|
: NULL;
|
2013-12-26 22:10:15 -08:00
|
|
|
}
|
2014-01-07 10:03:15 -08:00
|
|
|
|
2014-09-25 17:44:05 -07:00
|
|
|
void obs_source_set_volume(obs_source_t *source, float volume)
|
2014-01-07 10:03:15 -08:00
|
|
|
{
|
2015-10-17 02:51:13 -07:00
|
|
|
if (obs_source_valid(source, "obs_source_set_volume")) {
|
2019-06-22 22:13:45 -07:00
|
|
|
struct audio_action action = {.timestamp = os_gettime_ns(),
|
|
|
|
.type = AUDIO_ACTION_VOL,
|
|
|
|
.vol = volume};
|
libobs: Implement new audio subsystem
The new audio subsystem fixes two issues:
- First Primary issue it fixes is the ability for parent sources to
intercept the audio of child sources, and do custom processing on
them. The main reason for this was the ability to do custom
cross-fading in transitions, but it's also useful for things such as
side-chain effects, applying audio effects to entire scenes, applying
scene-specific audio filters on sub-sources, and other such
possibilities.
- The secondary issue that needed fixing was audio buffering.
Previously, audio buffering was always a fixed buffer size, so it
would always have exactly a certain number of milliseconds of audio
buffering (and thus output delay). Instead, it now dynamically
increases audio buffering only as necessary, minimizing output delay,
and removing the need for users to have to worry about an audio
buffering setting.
The new design makes it so that audio from the leaves of the scene graph
flow to the root nodes, and can be intercepted by parent sources. Each
audio source handles its own buffering, and each audio tick a specific
number of audio frames are popped from the front of the circular buffer
on each audio source. Composite sources (such as scenes) can access the
audio for child sources and do custom processing or mixing on that
audio. Composite sources use the audio_render callback of sources to do
synchronous or deferred audio processing per audio tick. Things like
scenes now mix audio from their sub-sources.
2015-12-20 03:06:35 -08:00
|
|
|
|
2016-01-18 20:01:58 -08:00
|
|
|
struct calldata data;
|
|
|
|
uint8_t stack[128];
|
|
|
|
|
|
|
|
calldata_init_fixed(&data, stack, sizeof(stack));
|
2014-08-05 17:49:28 -07:00
|
|
|
calldata_set_ptr(&data, "source", source);
|
|
|
|
calldata_set_float(&data, "volume", volume);
|
2014-02-21 16:51:16 -08:00
|
|
|
|
libobs: Add services API, reduce repeated code
Add API for streaming services. The services API simplifies the
creation of custom service features and user interface.
Custom streaming services later on will be able to do things such as:
- Be able to use service-specific APIs via modules, allowing a more
direct means of communicating with the service and requesting or
setting service-specific information
- Get URL/stream key via other means of authentication such as OAuth,
or be able to build custom URLs for services that require that sort
of thing.
- Query information (such as viewer count, chat, follower
notifications, and other information)
- Set channel information (such as current game, current channel title,
activating commercials)
Also, I reduce some repeated code that was used for all libobs objects.
This includes the name of the object, the private data, settings, as
well as the signal and procedure handlers.
I also switched to using linked lists for the global object lists,
rather than using an array of pointers (you could say it was..
pointless.) ..Anyway, the linked list info is also stored in the shared
context data structure.
2014-04-19 20:38:53 -07:00
|
|
|
signal_handler_signal(source->context.signals, "volume", &data);
|
2016-01-09 13:27:16 -08:00
|
|
|
if (!source->context.private)
|
|
|
|
signal_handler_signal(obs->signals, "source_volume",
|
2019-06-22 22:13:45 -07:00
|
|
|
&data);
|
2014-02-21 16:51:16 -08:00
|
|
|
|
2014-03-01 04:54:55 -08:00
|
|
|
volume = (float)calldata_float(&data, "volume");
|
2014-02-21 16:51:16 -08:00
|
|
|
|
libobs: Implement new audio subsystem
The new audio subsystem fixes two issues:
- First Primary issue it fixes is the ability for parent sources to
intercept the audio of child sources, and do custom processing on
them. The main reason for this was the ability to do custom
cross-fading in transitions, but it's also useful for things such as
side-chain effects, applying audio effects to entire scenes, applying
scene-specific audio filters on sub-sources, and other such
possibilities.
- The secondary issue that needed fixing was audio buffering.
Previously, audio buffering was always a fixed buffer size, so it
would always have exactly a certain number of milliseconds of audio
buffering (and thus output delay). Instead, it now dynamically
increases audio buffering only as necessary, minimizing output delay,
and removing the need for users to have to worry about an audio
buffering setting.
The new design makes it so that audio from the leaves of the scene graph
flow to the root nodes, and can be intercepted by parent sources. Each
audio source handles its own buffering, and each audio tick a specific
number of audio frames are popped from the front of the circular buffer
on each audio source. Composite sources (such as scenes) can access the
audio for child sources and do custom processing or mixing on that
audio. Composite sources use the audio_render callback of sources to do
synchronous or deferred audio processing per audio tick. Things like
scenes now mix audio from their sub-sources.
2015-12-20 03:06:35 -08:00
|
|
|
pthread_mutex_lock(&source->audio_actions_mutex);
|
|
|
|
da_push_back(source->audio_actions, &action);
|
|
|
|
pthread_mutex_unlock(&source->audio_actions_mutex);
|
|
|
|
|
2014-02-20 15:16:25 -08:00
|
|
|
source->user_volume = volume;
|
2014-02-21 16:51:16 -08:00
|
|
|
}
|
2014-02-20 14:53:16 -08:00
|
|
|
}
|
|
|
|
|
2014-09-26 15:25:59 -07:00
|
|
|
float obs_source_get_volume(const obs_source_t *source)
|
2014-01-07 10:03:15 -08:00
|
|
|
{
|
2019-06-22 22:13:45 -07:00
|
|
|
return obs_source_valid(source, "obs_source_get_volume")
|
|
|
|
? source->user_volume
|
|
|
|
: 0.0f;
|
2014-02-20 14:53:16 -08:00
|
|
|
}
|
|
|
|
|
2014-09-25 17:44:05 -07:00
|
|
|
void obs_source_set_sync_offset(obs_source_t *source, int64_t offset)
|
2014-02-20 15:16:25 -08:00
|
|
|
{
|
2015-10-17 02:51:13 -07:00
|
|
|
if (obs_source_valid(source, "obs_source_set_sync_offset")) {
|
2016-01-18 20:01:58 -08:00
|
|
|
struct calldata data;
|
|
|
|
uint8_t stack[128];
|
2014-12-27 20:55:03 -08:00
|
|
|
|
2016-01-18 20:01:58 -08:00
|
|
|
calldata_init_fixed(&data, stack, sizeof(stack));
|
2014-12-27 20:55:03 -08:00
|
|
|
calldata_set_ptr(&data, "source", source);
|
|
|
|
calldata_set_int(&data, "offset", offset);
|
|
|
|
|
|
|
|
signal_handler_signal(source->context.signals, "audio_sync",
|
2019-06-22 22:13:45 -07:00
|
|
|
&data);
|
2014-12-27 20:55:03 -08:00
|
|
|
|
|
|
|
source->sync_offset = calldata_int(&data, "offset");
|
|
|
|
}
|
2014-02-20 15:16:25 -08:00
|
|
|
}
|
|
|
|
|
2014-09-26 15:25:59 -07:00
|
|
|
int64_t obs_source_get_sync_offset(const obs_source_t *source)
|
2014-02-20 15:16:25 -08:00
|
|
|
{
|
2019-06-22 22:13:45 -07:00
|
|
|
return obs_source_valid(source, "obs_source_get_sync_offset")
|
|
|
|
? source->sync_offset
|
|
|
|
: 0;
|
2014-01-07 10:03:15 -08:00
|
|
|
}
|
2014-02-20 16:44:42 -08:00
|
|
|
|
|
|
|
struct source_enum_data {
|
|
|
|
obs_source_enum_proc_t enum_callback;
|
|
|
|
void *param;
|
|
|
|
};
|
|
|
|
|
2017-01-16 09:51:40 -08:00
|
|
|
static void enum_source_active_tree_callback(obs_source_t *parent,
|
2019-06-22 22:13:45 -07:00
|
|
|
obs_source_t *child, void *param)
|
2014-02-20 16:44:42 -08:00
|
|
|
{
|
|
|
|
struct source_enum_data *data = param;
|
2016-05-25 19:43:35 -07:00
|
|
|
bool is_transition = child->info.type == OBS_SOURCE_TYPE_TRANSITION;
|
2014-02-20 16:44:42 -08:00
|
|
|
|
2016-05-25 19:43:35 -07:00
|
|
|
if (is_transition)
|
2019-06-22 22:13:45 -07:00
|
|
|
obs_transition_enum_sources(
|
|
|
|
child, enum_source_active_tree_callback, param);
|
2015-12-22 04:59:02 -08:00
|
|
|
if (child->info.enum_active_sources) {
|
2014-12-27 20:21:22 -08:00
|
|
|
if (child->context.data) {
|
2019-06-22 22:13:45 -07:00
|
|
|
child->info.enum_active_sources(
|
|
|
|
child->context.data,
|
|
|
|
enum_source_active_tree_callback, data);
|
2014-12-27 20:21:22 -08:00
|
|
|
}
|
2014-02-20 16:44:42 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
data->enum_callback(parent, child, data->param);
|
|
|
|
}
|
|
|
|
|
2015-12-22 04:59:02 -08:00
|
|
|
void obs_source_enum_active_sources(obs_source_t *source,
|
2019-06-22 22:13:45 -07:00
|
|
|
obs_source_enum_proc_t enum_callback,
|
|
|
|
void *param)
|
2014-02-20 16:44:42 -08:00
|
|
|
{
|
libobs: Implement transition sources
Transition sources are implemented by registering a source type as
OBS_SOURCE_TYPE_TRANSITION. They're automatically marked as video
composite sources, and video_render/audio_render callbacks must be set
when registering the source. get_width and get_height callbacks are
unused for these types of sources, as transitions automatically handle
width/height behind the scenes with the transition settings.
In the video_render callback, the helper function
obs_transition_video_render is used to assist in automatically
processing and rendering the audio. A render callback is passed to the
function, which in turn passes to/from textures that are automatically
rendered in the back-end.
Similarly, in the audio_render callback, the helper function
obs_transition_audio_render is used to assist in automatically
processing and rendering the audio. Two mix callbacks are used to
handle how the source/destination sources are mixed together. To ensure
the best possible quality, audio processing is per-sample.
Transitions can be set to automatically resize, or they can be set to
have a fixed size. Sources within transitions can be made to scale to
the transition size (with or without aspect ratio), or to not scale
unless they're bigger than the transition. They can have a specific
alignment within the transition, or they just default to top-left.
These features are implemented for the purpose of extending transitions
to also act as "switch" sources later, where you can switch to/from two
different sources using the transition animation.
Planned (but not yet implemented and lower priority) features:
- "Switch" transitions which allow the ability to switch back and forth
between two sources with a transitioning animation without discarding
the references
- Easing options to allow the option to transition with a bezier or
custom curve
- Manual transitioning to allow the front-end/user to manually control
the transition offset
2016-01-03 16:41:14 -08:00
|
|
|
bool is_transition;
|
2015-12-22 04:59:02 -08:00
|
|
|
if (!data_valid(source, "obs_source_enum_active_sources"))
|
2015-10-16 18:49:45 -07:00
|
|
|
return;
|
libobs: Implement transition sources
Transition sources are implemented by registering a source type as
OBS_SOURCE_TYPE_TRANSITION. They're automatically marked as video
composite sources, and video_render/audio_render callbacks must be set
when registering the source. get_width and get_height callbacks are
unused for these types of sources, as transitions automatically handle
width/height behind the scenes with the transition settings.
In the video_render callback, the helper function
obs_transition_video_render is used to assist in automatically
processing and rendering the audio. A render callback is passed to the
function, which in turn passes to/from textures that are automatically
rendered in the back-end.
Similarly, in the audio_render callback, the helper function
obs_transition_audio_render is used to assist in automatically
processing and rendering the audio. Two mix callbacks are used to
handle how the source/destination sources are mixed together. To ensure
the best possible quality, audio processing is per-sample.
Transitions can be set to automatically resize, or they can be set to
have a fixed size. Sources within transitions can be made to scale to
the transition size (with or without aspect ratio), or to not scale
unless they're bigger than the transition. They can have a specific
alignment within the transition, or they just default to top-left.
These features are implemented for the purpose of extending transitions
to also act as "switch" sources later, where you can switch to/from two
different sources using the transition animation.
Planned (but not yet implemented and lower priority) features:
- "Switch" transitions which allow the ability to switch back and forth
between two sources with a transitioning animation without discarding
the references
- Easing options to allow the option to transition with a bezier or
custom curve
- Manual transitioning to allow the front-end/user to manually control
the transition offset
2016-01-03 16:41:14 -08:00
|
|
|
|
|
|
|
is_transition = source->info.type == OBS_SOURCE_TYPE_TRANSITION;
|
|
|
|
if (!is_transition && !source->info.enum_active_sources)
|
2014-02-20 16:44:42 -08:00
|
|
|
return;
|
|
|
|
|
|
|
|
obs_source_addref(source);
|
|
|
|
|
libobs: Implement transition sources
Transition sources are implemented by registering a source type as
OBS_SOURCE_TYPE_TRANSITION. They're automatically marked as video
composite sources, and video_render/audio_render callbacks must be set
when registering the source. get_width and get_height callbacks are
unused for these types of sources, as transitions automatically handle
width/height behind the scenes with the transition settings.
In the video_render callback, the helper function
obs_transition_video_render is used to assist in automatically
processing and rendering the audio. A render callback is passed to the
function, which in turn passes to/from textures that are automatically
rendered in the back-end.
Similarly, in the audio_render callback, the helper function
obs_transition_audio_render is used to assist in automatically
processing and rendering the audio. Two mix callbacks are used to
handle how the source/destination sources are mixed together. To ensure
the best possible quality, audio processing is per-sample.
Transitions can be set to automatically resize, or they can be set to
have a fixed size. Sources within transitions can be made to scale to
the transition size (with or without aspect ratio), or to not scale
unless they're bigger than the transition. They can have a specific
alignment within the transition, or they just default to top-left.
These features are implemented for the purpose of extending transitions
to also act as "switch" sources later, where you can switch to/from two
different sources using the transition animation.
Planned (but not yet implemented and lower priority) features:
- "Switch" transitions which allow the ability to switch back and forth
between two sources with a transitioning animation without discarding
the references
- Easing options to allow the option to transition with a bezier or
custom curve
- Manual transitioning to allow the front-end/user to manually control
the transition offset
2016-01-03 16:41:14 -08:00
|
|
|
if (is_transition)
|
|
|
|
obs_transition_enum_sources(source, enum_callback, param);
|
|
|
|
if (source->info.enum_active_sources)
|
|
|
|
source->info.enum_active_sources(source->context.data,
|
2019-06-22 22:13:45 -07:00
|
|
|
enum_callback, param);
|
2014-02-20 16:44:42 -08:00
|
|
|
|
|
|
|
obs_source_release(source);
|
|
|
|
}
|
|
|
|
|
2015-12-22 04:59:02 -08:00
|
|
|
void obs_source_enum_active_tree(obs_source_t *source,
|
2019-06-22 22:13:45 -07:00
|
|
|
obs_source_enum_proc_t enum_callback,
|
|
|
|
void *param)
|
2014-02-20 16:44:42 -08:00
|
|
|
{
|
|
|
|
struct source_enum_data data = {enum_callback, param};
|
libobs: Implement transition sources
Transition sources are implemented by registering a source type as
OBS_SOURCE_TYPE_TRANSITION. They're automatically marked as video
composite sources, and video_render/audio_render callbacks must be set
when registering the source. get_width and get_height callbacks are
unused for these types of sources, as transitions automatically handle
width/height behind the scenes with the transition settings.
In the video_render callback, the helper function
obs_transition_video_render is used to assist in automatically
processing and rendering the audio. A render callback is passed to the
function, which in turn passes to/from textures that are automatically
rendered in the back-end.
Similarly, in the audio_render callback, the helper function
obs_transition_audio_render is used to assist in automatically
processing and rendering the audio. Two mix callbacks are used to
handle how the source/destination sources are mixed together. To ensure
the best possible quality, audio processing is per-sample.
Transitions can be set to automatically resize, or they can be set to
have a fixed size. Sources within transitions can be made to scale to
the transition size (with or without aspect ratio), or to not scale
unless they're bigger than the transition. They can have a specific
alignment within the transition, or they just default to top-left.
These features are implemented for the purpose of extending transitions
to also act as "switch" sources later, where you can switch to/from two
different sources using the transition animation.
Planned (but not yet implemented and lower priority) features:
- "Switch" transitions which allow the ability to switch back and forth
between two sources with a transitioning animation without discarding
the references
- Easing options to allow the option to transition with a bezier or
custom curve
- Manual transitioning to allow the front-end/user to manually control
the transition offset
2016-01-03 16:41:14 -08:00
|
|
|
bool is_transition;
|
2014-02-20 16:44:42 -08:00
|
|
|
|
2015-12-22 04:59:02 -08:00
|
|
|
if (!data_valid(source, "obs_source_enum_active_tree"))
|
2015-10-16 18:49:45 -07:00
|
|
|
return;
|
libobs: Implement transition sources
Transition sources are implemented by registering a source type as
OBS_SOURCE_TYPE_TRANSITION. They're automatically marked as video
composite sources, and video_render/audio_render callbacks must be set
when registering the source. get_width and get_height callbacks are
unused for these types of sources, as transitions automatically handle
width/height behind the scenes with the transition settings.
In the video_render callback, the helper function
obs_transition_video_render is used to assist in automatically
processing and rendering the audio. A render callback is passed to the
function, which in turn passes to/from textures that are automatically
rendered in the back-end.
Similarly, in the audio_render callback, the helper function
obs_transition_audio_render is used to assist in automatically
processing and rendering the audio. Two mix callbacks are used to
handle how the source/destination sources are mixed together. To ensure
the best possible quality, audio processing is per-sample.
Transitions can be set to automatically resize, or they can be set to
have a fixed size. Sources within transitions can be made to scale to
the transition size (with or without aspect ratio), or to not scale
unless they're bigger than the transition. They can have a specific
alignment within the transition, or they just default to top-left.
These features are implemented for the purpose of extending transitions
to also act as "switch" sources later, where you can switch to/from two
different sources using the transition animation.
Planned (but not yet implemented and lower priority) features:
- "Switch" transitions which allow the ability to switch back and forth
between two sources with a transitioning animation without discarding
the references
- Easing options to allow the option to transition with a bezier or
custom curve
- Manual transitioning to allow the front-end/user to manually control
the transition offset
2016-01-03 16:41:14 -08:00
|
|
|
|
|
|
|
is_transition = source->info.type == OBS_SOURCE_TYPE_TRANSITION;
|
|
|
|
if (!is_transition && !source->info.enum_active_sources)
|
2014-02-20 16:44:42 -08:00
|
|
|
return;
|
|
|
|
|
|
|
|
obs_source_addref(source);
|
|
|
|
|
libobs: Implement transition sources
Transition sources are implemented by registering a source type as
OBS_SOURCE_TYPE_TRANSITION. They're automatically marked as video
composite sources, and video_render/audio_render callbacks must be set
when registering the source. get_width and get_height callbacks are
unused for these types of sources, as transitions automatically handle
width/height behind the scenes with the transition settings.
In the video_render callback, the helper function
obs_transition_video_render is used to assist in automatically
processing and rendering the audio. A render callback is passed to the
function, which in turn passes to/from textures that are automatically
rendered in the back-end.
Similarly, in the audio_render callback, the helper function
obs_transition_audio_render is used to assist in automatically
processing and rendering the audio. Two mix callbacks are used to
handle how the source/destination sources are mixed together. To ensure
the best possible quality, audio processing is per-sample.
Transitions can be set to automatically resize, or they can be set to
have a fixed size. Sources within transitions can be made to scale to
the transition size (with or without aspect ratio), or to not scale
unless they're bigger than the transition. They can have a specific
alignment within the transition, or they just default to top-left.
These features are implemented for the purpose of extending transitions
to also act as "switch" sources later, where you can switch to/from two
different sources using the transition animation.
Planned (but not yet implemented and lower priority) features:
- "Switch" transitions which allow the ability to switch back and forth
between two sources with a transitioning animation without discarding
the references
- Easing options to allow the option to transition with a bezier or
custom curve
- Manual transitioning to allow the front-end/user to manually control
the transition offset
2016-01-03 16:41:14 -08:00
|
|
|
if (source->info.type == OBS_SOURCE_TYPE_TRANSITION)
|
2019-06-22 22:13:45 -07:00
|
|
|
obs_transition_enum_sources(
|
|
|
|
source, enum_source_active_tree_callback, &data);
|
libobs: Implement transition sources
Transition sources are implemented by registering a source type as
OBS_SOURCE_TYPE_TRANSITION. They're automatically marked as video
composite sources, and video_render/audio_render callbacks must be set
when registering the source. get_width and get_height callbacks are
unused for these types of sources, as transitions automatically handle
width/height behind the scenes with the transition settings.
In the video_render callback, the helper function
obs_transition_video_render is used to assist in automatically
processing and rendering the audio. A render callback is passed to the
function, which in turn passes to/from textures that are automatically
rendered in the back-end.
Similarly, in the audio_render callback, the helper function
obs_transition_audio_render is used to assist in automatically
processing and rendering the audio. Two mix callbacks are used to
handle how the source/destination sources are mixed together. To ensure
the best possible quality, audio processing is per-sample.
Transitions can be set to automatically resize, or they can be set to
have a fixed size. Sources within transitions can be made to scale to
the transition size (with or without aspect ratio), or to not scale
unless they're bigger than the transition. They can have a specific
alignment within the transition, or they just default to top-left.
These features are implemented for the purpose of extending transitions
to also act as "switch" sources later, where you can switch to/from two
different sources using the transition animation.
Planned (but not yet implemented and lower priority) features:
- "Switch" transitions which allow the ability to switch back and forth
between two sources with a transitioning animation without discarding
the references
- Easing options to allow the option to transition with a bezier or
custom curve
- Manual transitioning to allow the front-end/user to manually control
the transition offset
2016-01-03 16:41:14 -08:00
|
|
|
if (source->info.enum_active_sources)
|
2019-06-22 22:13:45 -07:00
|
|
|
source->info.enum_active_sources(
|
|
|
|
source->context.data, enum_source_active_tree_callback,
|
|
|
|
&data);
|
2017-01-16 09:51:40 -08:00
|
|
|
|
|
|
|
obs_source_release(source);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void enum_source_full_tree_callback(obs_source_t *parent,
|
2019-06-22 22:13:45 -07:00
|
|
|
obs_source_t *child, void *param)
|
2017-01-16 09:51:40 -08:00
|
|
|
{
|
|
|
|
struct source_enum_data *data = param;
|
|
|
|
bool is_transition = child->info.type == OBS_SOURCE_TYPE_TRANSITION;
|
|
|
|
|
|
|
|
if (is_transition)
|
2019-06-22 22:13:45 -07:00
|
|
|
obs_transition_enum_sources(
|
|
|
|
child, enum_source_full_tree_callback, param);
|
2017-01-16 09:51:40 -08:00
|
|
|
if (child->info.enum_all_sources) {
|
|
|
|
if (child->context.data) {
|
2019-06-22 22:13:45 -07:00
|
|
|
child->info.enum_active_sources(
|
|
|
|
child->context.data,
|
|
|
|
enum_source_full_tree_callback, data);
|
2017-01-16 09:51:40 -08:00
|
|
|
}
|
|
|
|
} else if (child->info.enum_active_sources) {
|
|
|
|
if (child->context.data) {
|
2019-06-22 22:13:45 -07:00
|
|
|
child->info.enum_active_sources(
|
|
|
|
child->context.data,
|
|
|
|
enum_source_full_tree_callback, data);
|
2017-01-16 09:51:40 -08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
data->enum_callback(parent, child, data->param);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void obs_source_enum_full_tree(obs_source_t *source,
|
2019-06-22 22:13:45 -07:00
|
|
|
obs_source_enum_proc_t enum_callback,
|
|
|
|
void *param)
|
2017-01-16 09:51:40 -08:00
|
|
|
{
|
|
|
|
struct source_enum_data data = {enum_callback, param};
|
|
|
|
bool is_transition;
|
|
|
|
|
|
|
|
if (!data_valid(source, "obs_source_enum_active_tree"))
|
|
|
|
return;
|
|
|
|
|
|
|
|
is_transition = source->info.type == OBS_SOURCE_TYPE_TRANSITION;
|
|
|
|
if (!is_transition && !source->info.enum_active_sources)
|
|
|
|
return;
|
|
|
|
|
|
|
|
obs_source_addref(source);
|
|
|
|
|
|
|
|
if (source->info.type == OBS_SOURCE_TYPE_TRANSITION)
|
2019-06-22 22:13:45 -07:00
|
|
|
obs_transition_enum_sources(
|
|
|
|
source, enum_source_full_tree_callback, &data);
|
2017-01-16 09:51:40 -08:00
|
|
|
|
|
|
|
if (source->info.enum_all_sources) {
|
|
|
|
source->info.enum_all_sources(source->context.data,
|
2019-06-22 22:13:45 -07:00
|
|
|
enum_source_full_tree_callback,
|
|
|
|
&data);
|
2017-01-16 09:51:40 -08:00
|
|
|
|
|
|
|
} else if (source->info.enum_active_sources) {
|
|
|
|
source->info.enum_active_sources(source->context.data,
|
2019-06-22 22:13:45 -07:00
|
|
|
enum_source_full_tree_callback,
|
|
|
|
&data);
|
2017-01-16 09:51:40 -08:00
|
|
|
}
|
2014-02-20 16:44:42 -08:00
|
|
|
|
|
|
|
obs_source_release(source);
|
|
|
|
}
|
2014-02-20 21:04:14 -08:00
|
|
|
|
2014-12-27 20:21:22 -08:00
|
|
|
struct descendant_info {
|
|
|
|
bool exists;
|
|
|
|
obs_source_t *target;
|
|
|
|
};
|
|
|
|
|
|
|
|
static void check_descendant(obs_source_t *parent, obs_source_t *child,
|
2019-06-22 22:13:45 -07:00
|
|
|
void *param)
|
2014-02-20 21:04:14 -08:00
|
|
|
{
|
2014-12-27 20:21:22 -08:00
|
|
|
struct descendant_info *info = param;
|
|
|
|
if (child == info->target || parent == info->target)
|
|
|
|
info->exists = true;
|
|
|
|
}
|
|
|
|
|
2015-12-22 04:59:02 -08:00
|
|
|
bool obs_source_add_active_child(obs_source_t *parent, obs_source_t *child)
|
2014-12-27 20:21:22 -08:00
|
|
|
{
|
libobs: Allow duplicate sources per scene
Previously having a source multiple times in a single scene would cause
the recursion check to trigger. Example scenes.json:
{
"current_scene": "Scene",
"sources": [
{
"flags": 0,
"id": "scene",
"mixers": 0,
"name": "Scene",
"settings": {
"items": [
{
"align": 5,
"bounds": {
"x": 0.0,
"y": 0.0
},
"bounds_align": 0,
"bounds_type": 0,
"name": "Text (FreeType 2)",
"pos": {
"x": 0.0,
"y": 0.0
},
"rot": 0.0,
"scale": {
"x": 1.0,
"y": 1.0
},
"visible": true
},
{
"align": 5,
"bounds": {
"x": 0.0,
"y": 0.0
},
"bounds_align": 0,
"bounds_type": 0,
"name": "Text (FreeType 2)",
"pos": {
"x": 0.0,
"y": 98.0
},
"rot": 0.0,
"scale": {
"x": 1.0,
"y": 1.0
},
"visible": true
}
]
},
"sync": 0,
"volume": 1.0
},
{
"flags": 0,
"id": "text_ft2_source",
"mixers": 0,
"name": "Text (FreeType 2)",
"settings": {},
"sync": 0,
"volume": 1.0
}
]
}
2015-02-17 03:28:09 -08:00
|
|
|
struct descendant_info info = {false, parent};
|
2015-10-17 02:51:13 -07:00
|
|
|
|
2015-12-22 04:59:02 -08:00
|
|
|
if (!obs_ptr_valid(parent, "obs_source_add_active_child"))
|
2015-10-17 02:51:13 -07:00
|
|
|
return false;
|
2015-12-22 04:59:02 -08:00
|
|
|
if (!obs_ptr_valid(child, "obs_source_add_active_child"))
|
2015-10-17 02:51:13 -07:00
|
|
|
return false;
|
|
|
|
if (parent == child) {
|
2015-12-22 04:59:02 -08:00
|
|
|
blog(LOG_WARNING, "obs_source_add_active_child: "
|
2019-06-22 22:13:45 -07:00
|
|
|
"parent == child");
|
2015-10-17 02:51:13 -07:00
|
|
|
return false;
|
|
|
|
}
|
2014-12-27 20:21:22 -08:00
|
|
|
|
2017-01-16 09:53:16 -08:00
|
|
|
obs_source_enum_full_tree(child, check_descendant, &info);
|
2014-12-27 20:21:22 -08:00
|
|
|
if (info.exists)
|
|
|
|
return false;
|
2014-02-20 21:04:14 -08:00
|
|
|
|
2014-02-23 16:46:00 -08:00
|
|
|
for (int i = 0; i < parent->show_refs; i++) {
|
|
|
|
enum view_type type;
|
|
|
|
type = (i < parent->activate_refs) ? MAIN_VIEW : AUX_VIEW;
|
|
|
|
obs_source_activate(child, type);
|
|
|
|
}
|
2014-12-27 20:21:22 -08:00
|
|
|
|
|
|
|
return true;
|
2014-02-20 21:04:14 -08:00
|
|
|
}
|
|
|
|
|
2015-12-22 04:59:02 -08:00
|
|
|
void obs_source_remove_active_child(obs_source_t *parent, obs_source_t *child)
|
2014-02-20 21:04:14 -08:00
|
|
|
{
|
2015-12-22 04:59:02 -08:00
|
|
|
if (!obs_ptr_valid(parent, "obs_source_remove_active_child"))
|
2015-10-17 02:51:13 -07:00
|
|
|
return;
|
2015-12-22 04:59:02 -08:00
|
|
|
if (!obs_ptr_valid(child, "obs_source_remove_active_child"))
|
2015-10-17 02:51:13 -07:00
|
|
|
return;
|
2014-02-20 21:04:14 -08:00
|
|
|
|
2014-02-23 16:46:00 -08:00
|
|
|
for (int i = 0; i < parent->show_refs; i++) {
|
|
|
|
enum view_type type;
|
|
|
|
type = (i < parent->activate_refs) ? MAIN_VIEW : AUX_VIEW;
|
|
|
|
obs_source_deactivate(child, type);
|
|
|
|
}
|
2014-02-20 21:04:14 -08:00
|
|
|
}
|
Implement volume handling
- Remove obs_source::type because it became redundant now that the
type is always stored in the obs_source::info variable.
- Apply presentation volumes of 1.0 and 0.0 to sources when they
activate/deactivate, respectively. It also applies that presentation
volume to all sub-sources, with exception of transition sources.
Transition sources must apply presentation volume manually to their
sub-sources with the new transition functions below.
- Add a "transition_volume" variable to obs_source structure, and add
three functions for handling volume for transitions:
* obs_transition_begin_frame
* obs_source_set_transition_vol
* obs_transition_end_frame
Because the to/from targets of a transition source might both contain
some of the same sources, handling the transitioning of volumes for
that specific situation becomes an issue.
So for transitions, instead of modifying the presentation volumes
directly for both sets of sources, we do this:
- First, call obs_transition_begin_frame at the beginning of each
transition frame, which will reset transition volumes for all
sub-sources to 0. Presentation volumes remain unchanged.
- Call obs_source_set_transition_vol on each sub-source, which will
then add the volume to the transition volume for each source in
that source's tree. Presentation volumes still remain unchanged.
- Then you call obs_trandition_end_frame when complete, which will
then finally set the presentation volumes to the transition
volumes.
For example, let's say that there's one source that's within both the
"transitioning from" sources and "transition to" sources. It would
add both the fade in and fade out volumes to that source, and then
when the frame is complete, it would set the presentation volume to
the sum of those two values, rather than set the presentation volume
for that same source twice which would cause weird volume jittering
and also set the wrong values.
2014-02-21 18:41:38 -08:00
|
|
|
|
2014-09-25 17:44:05 -07:00
|
|
|
void obs_source_save(obs_source_t *source)
|
2014-04-26 23:47:50 -07:00
|
|
|
{
|
2015-10-16 18:49:45 -07:00
|
|
|
if (!data_valid(source, "obs_source_save"))
|
|
|
|
return;
|
|
|
|
|
2015-10-28 12:38:47 -07:00
|
|
|
obs_source_dosignal(source, "source_save", "save");
|
|
|
|
|
|
|
|
if (source->info.save)
|
|
|
|
source->info.save(source->context.data,
|
2019-06-22 22:13:45 -07:00
|
|
|
source->context.settings);
|
2014-04-26 23:47:50 -07:00
|
|
|
}
|
|
|
|
|
2014-09-25 17:44:05 -07:00
|
|
|
void obs_source_load(obs_source_t *source)
|
2014-04-26 23:47:50 -07:00
|
|
|
{
|
2015-10-16 18:49:45 -07:00
|
|
|
if (!data_valid(source, "obs_source_load"))
|
|
|
|
return;
|
2015-10-28 12:38:47 -07:00
|
|
|
if (source->info.load)
|
|
|
|
source->info.load(source->context.data,
|
2019-06-22 22:13:45 -07:00
|
|
|
source->context.settings);
|
2015-10-16 18:49:45 -07:00
|
|
|
|
2015-10-28 12:38:47 -07:00
|
|
|
obs_source_dosignal(source, "source_load", "load");
|
2014-04-26 23:47:50 -07:00
|
|
|
}
|
2014-10-23 09:56:50 -07:00
|
|
|
|
2014-12-23 17:50:20 -08:00
|
|
|
bool obs_source_active(const obs_source_t *source)
|
|
|
|
{
|
2019-06-22 22:13:45 -07:00
|
|
|
return obs_source_valid(source, "obs_source_active")
|
|
|
|
? source->activate_refs != 0
|
|
|
|
: false;
|
2014-12-23 17:50:20 -08:00
|
|
|
}
|
|
|
|
|
2014-12-31 15:26:02 -08:00
|
|
|
bool obs_source_showing(const obs_source_t *source)
|
|
|
|
{
|
2019-06-22 22:13:45 -07:00
|
|
|
return obs_source_valid(source, "obs_source_showing")
|
|
|
|
? source->show_refs != 0
|
|
|
|
: false;
|
2014-12-31 15:26:02 -08:00
|
|
|
}
|
|
|
|
|
2014-10-23 09:56:50 -07:00
|
|
|
static inline void signal_flags_updated(obs_source_t *source)
|
|
|
|
{
|
2016-01-18 20:01:58 -08:00
|
|
|
struct calldata data;
|
|
|
|
uint8_t stack[128];
|
2014-10-23 09:56:50 -07:00
|
|
|
|
2016-01-18 20:01:58 -08:00
|
|
|
calldata_init_fixed(&data, stack, sizeof(stack));
|
2014-10-23 09:56:50 -07:00
|
|
|
calldata_set_ptr(&data, "source", source);
|
|
|
|
calldata_set_int(&data, "flags", source->flags);
|
|
|
|
|
|
|
|
signal_handler_signal(source->context.signals, "update_flags", &data);
|
|
|
|
}
|
|
|
|
|
|
|
|
void obs_source_set_flags(obs_source_t *source, uint32_t flags)
|
|
|
|
{
|
2015-10-17 02:51:13 -07:00
|
|
|
if (!obs_source_valid(source, "obs_source_set_flags"))
|
|
|
|
return;
|
2014-10-23 09:56:50 -07:00
|
|
|
|
|
|
|
if (flags != source->flags) {
|
|
|
|
source->flags = flags;
|
|
|
|
signal_flags_updated(source);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-12-31 21:58:45 -08:00
|
|
|
void obs_source_set_default_flags(obs_source_t *source, uint32_t flags)
|
|
|
|
{
|
2015-10-17 02:51:13 -07:00
|
|
|
if (!obs_source_valid(source, "obs_source_set_default_flags"))
|
|
|
|
return;
|
2014-12-31 21:58:45 -08:00
|
|
|
|
|
|
|
source->default_flags = flags;
|
|
|
|
}
|
|
|
|
|
2014-10-23 09:56:50 -07:00
|
|
|
uint32_t obs_source_get_flags(const obs_source_t *source)
|
|
|
|
{
|
2019-06-22 22:13:45 -07:00
|
|
|
return obs_source_valid(source, "obs_source_get_flags") ? source->flags
|
|
|
|
: 0;
|
2014-10-23 09:56:50 -07:00
|
|
|
}
|
Add helper functions for drawing sources
If you look at the previous commits, you'll see I had added
obs_source_draw before. For custom drawn sources in particular, each
time obs_source_draw was called, it would restart the effect and its
passes for each draw call, which was not optimal. It should really use
the effect functions for that. I'll have to add a function to simplify
effect usage.
I also realized that including the color matrix parameters in
obs_source_draw made the function kind of messy to use; instead,
separating the color matrix stuff out to
obs_source_draw_set_color_matrix feels a lot more clean.
On top of that, having the ability to set the position would be nice to
have as well, rather than having to mess with the matrix stuff each
time, so I also added that for the sake of convenience.
obs_source_draw will draw a texture sprite, optionally of a specific
size and/or at a specific position, as well as optionally inverted. The
texture used will be set to the 'image' parameter of whatever effect is
currently active.
obs_source_draw_set_color_matrix will set the color matrix value if the
drawing requires color matrices. It will set the 'color_matrix',
'color_range_min', and 'color_range_max' parameters of whatever effect
is currently active.
Overall, these feel much more clean to use than the previous iteration.
2014-11-19 17:36:12 -08:00
|
|
|
|
(API Change) Add support for multiple audio mixers
API changed:
--------------------------
void obs_output_set_audio_encoder(
obs_output_t *output,
obs_encoder_t *encoder);
obs_encoder_t *obs_output_get_audio_encoder(
const obs_output_t *output);
obs_encoder_t *obs_audio_encoder_create(
const char *id,
const char *name,
obs_data_t *settings);
Changed to:
--------------------------
/* 'idx' specifies the track index of the output */
void obs_output_set_audio_encoder(
obs_output_t *output,
obs_encoder_t *encoder,
size_t idx);
/* 'idx' specifies the track index of the output */
obs_encoder_t *obs_output_get_audio_encoder(
const obs_output_t *output,
size_t idx);
/* 'mixer_idx' specifies the mixer index to capture audio from */
obs_encoder_t *obs_audio_encoder_create(
const char *id,
const char *name,
obs_data_t *settings,
size_t mixer_idx);
Overview
--------------------------
This feature allows multiple audio mixers to be used at a time. This
capability was able to be added with surprisingly very little extra
overhead. Audio will not be mixed unless it's assigned to a specific
mixer, and mixers will not mix unless they have an active mix
connection.
Mostly this will be useful for being able to separate out specific audio
for recording versus streaming, but will also be useful for certain
streaming services that support multiple audio streams via RTMP.
I didn't want to use a variable amount of mixers due to the desire to
reduce heap allocations, so currently I set the limit to 4 simultaneous
mixers; this number can be increased later if needed, but honestly I
feel like it's just the right number to use.
Sources:
Sources can now specify which audio mixers their audio is mixed to; this
can be a single mixer or multiple mixers at a time. The
obs_source_set_audio_mixers function sets the audio mixer which an audio
source applies to. For example, 0xF would mean that the source applies
to all four mixers.
Audio Encoders:
Audio encoders now must specify which specific audio mixer they use when
they encode audio data.
Outputs:
Outputs that use encoders can now support multiple audio tracks at once
if they have the OBS_OUTPUT_MULTI_TRACK capability flag set. This is
mostly only useful for certain types of RTMP transmissions, though may
be useful for file formats that support multiple audio tracks as well
later on.
2015-01-14 02:12:08 -08:00
|
|
|
void obs_source_set_audio_mixers(obs_source_t *source, uint32_t mixers)
|
|
|
|
{
|
2016-01-18 20:01:58 -08:00
|
|
|
struct calldata data;
|
|
|
|
uint8_t stack[128];
|
(API Change) Add support for multiple audio mixers
API changed:
--------------------------
void obs_output_set_audio_encoder(
obs_output_t *output,
obs_encoder_t *encoder);
obs_encoder_t *obs_output_get_audio_encoder(
const obs_output_t *output);
obs_encoder_t *obs_audio_encoder_create(
const char *id,
const char *name,
obs_data_t *settings);
Changed to:
--------------------------
/* 'idx' specifies the track index of the output */
void obs_output_set_audio_encoder(
obs_output_t *output,
obs_encoder_t *encoder,
size_t idx);
/* 'idx' specifies the track index of the output */
obs_encoder_t *obs_output_get_audio_encoder(
const obs_output_t *output,
size_t idx);
/* 'mixer_idx' specifies the mixer index to capture audio from */
obs_encoder_t *obs_audio_encoder_create(
const char *id,
const char *name,
obs_data_t *settings,
size_t mixer_idx);
Overview
--------------------------
This feature allows multiple audio mixers to be used at a time. This
capability was able to be added with surprisingly very little extra
overhead. Audio will not be mixed unless it's assigned to a specific
mixer, and mixers will not mix unless they have an active mix
connection.
Mostly this will be useful for being able to separate out specific audio
for recording versus streaming, but will also be useful for certain
streaming services that support multiple audio streams via RTMP.
I didn't want to use a variable amount of mixers due to the desire to
reduce heap allocations, so currently I set the limit to 4 simultaneous
mixers; this number can be increased later if needed, but honestly I
feel like it's just the right number to use.
Sources:
Sources can now specify which audio mixers their audio is mixed to; this
can be a single mixer or multiple mixers at a time. The
obs_source_set_audio_mixers function sets the audio mixer which an audio
source applies to. For example, 0xF would mean that the source applies
to all four mixers.
Audio Encoders:
Audio encoders now must specify which specific audio mixer they use when
they encode audio data.
Outputs:
Outputs that use encoders can now support multiple audio tracks at once
if they have the OBS_OUTPUT_MULTI_TRACK capability flag set. This is
mostly only useful for certain types of RTMP transmissions, though may
be useful for file formats that support multiple audio tracks as well
later on.
2015-01-14 02:12:08 -08:00
|
|
|
|
2015-10-17 02:51:13 -07:00
|
|
|
if (!obs_source_valid(source, "obs_source_set_audio_mixers"))
|
|
|
|
return;
|
|
|
|
if ((source->info.output_flags & OBS_SOURCE_AUDIO) == 0)
|
|
|
|
return;
|
(API Change) Add support for multiple audio mixers
API changed:
--------------------------
void obs_output_set_audio_encoder(
obs_output_t *output,
obs_encoder_t *encoder);
obs_encoder_t *obs_output_get_audio_encoder(
const obs_output_t *output);
obs_encoder_t *obs_audio_encoder_create(
const char *id,
const char *name,
obs_data_t *settings);
Changed to:
--------------------------
/* 'idx' specifies the track index of the output */
void obs_output_set_audio_encoder(
obs_output_t *output,
obs_encoder_t *encoder,
size_t idx);
/* 'idx' specifies the track index of the output */
obs_encoder_t *obs_output_get_audio_encoder(
const obs_output_t *output,
size_t idx);
/* 'mixer_idx' specifies the mixer index to capture audio from */
obs_encoder_t *obs_audio_encoder_create(
const char *id,
const char *name,
obs_data_t *settings,
size_t mixer_idx);
Overview
--------------------------
This feature allows multiple audio mixers to be used at a time. This
capability was able to be added with surprisingly very little extra
overhead. Audio will not be mixed unless it's assigned to a specific
mixer, and mixers will not mix unless they have an active mix
connection.
Mostly this will be useful for being able to separate out specific audio
for recording versus streaming, but will also be useful for certain
streaming services that support multiple audio streams via RTMP.
I didn't want to use a variable amount of mixers due to the desire to
reduce heap allocations, so currently I set the limit to 4 simultaneous
mixers; this number can be increased later if needed, but honestly I
feel like it's just the right number to use.
Sources:
Sources can now specify which audio mixers their audio is mixed to; this
can be a single mixer or multiple mixers at a time. The
obs_source_set_audio_mixers function sets the audio mixer which an audio
source applies to. For example, 0xF would mean that the source applies
to all four mixers.
Audio Encoders:
Audio encoders now must specify which specific audio mixer they use when
they encode audio data.
Outputs:
Outputs that use encoders can now support multiple audio tracks at once
if they have the OBS_OUTPUT_MULTI_TRACK capability flag set. This is
mostly only useful for certain types of RTMP transmissions, though may
be useful for file formats that support multiple audio tracks as well
later on.
2015-01-14 02:12:08 -08:00
|
|
|
|
2015-12-17 04:03:35 -08:00
|
|
|
if (source->audio_mixers == mixers)
|
(API Change) Add support for multiple audio mixers
API changed:
--------------------------
void obs_output_set_audio_encoder(
obs_output_t *output,
obs_encoder_t *encoder);
obs_encoder_t *obs_output_get_audio_encoder(
const obs_output_t *output);
obs_encoder_t *obs_audio_encoder_create(
const char *id,
const char *name,
obs_data_t *settings);
Changed to:
--------------------------
/* 'idx' specifies the track index of the output */
void obs_output_set_audio_encoder(
obs_output_t *output,
obs_encoder_t *encoder,
size_t idx);
/* 'idx' specifies the track index of the output */
obs_encoder_t *obs_output_get_audio_encoder(
const obs_output_t *output,
size_t idx);
/* 'mixer_idx' specifies the mixer index to capture audio from */
obs_encoder_t *obs_audio_encoder_create(
const char *id,
const char *name,
obs_data_t *settings,
size_t mixer_idx);
Overview
--------------------------
This feature allows multiple audio mixers to be used at a time. This
capability was able to be added with surprisingly very little extra
overhead. Audio will not be mixed unless it's assigned to a specific
mixer, and mixers will not mix unless they have an active mix
connection.
Mostly this will be useful for being able to separate out specific audio
for recording versus streaming, but will also be useful for certain
streaming services that support multiple audio streams via RTMP.
I didn't want to use a variable amount of mixers due to the desire to
reduce heap allocations, so currently I set the limit to 4 simultaneous
mixers; this number can be increased later if needed, but honestly I
feel like it's just the right number to use.
Sources:
Sources can now specify which audio mixers their audio is mixed to; this
can be a single mixer or multiple mixers at a time. The
obs_source_set_audio_mixers function sets the audio mixer which an audio
source applies to. For example, 0xF would mean that the source applies
to all four mixers.
Audio Encoders:
Audio encoders now must specify which specific audio mixer they use when
they encode audio data.
Outputs:
Outputs that use encoders can now support multiple audio tracks at once
if they have the OBS_OUTPUT_MULTI_TRACK capability flag set. This is
mostly only useful for certain types of RTMP transmissions, though may
be useful for file formats that support multiple audio tracks as well
later on.
2015-01-14 02:12:08 -08:00
|
|
|
return;
|
|
|
|
|
2016-01-18 20:01:58 -08:00
|
|
|
calldata_init_fixed(&data, stack, sizeof(stack));
|
(API Change) Add support for multiple audio mixers
API changed:
--------------------------
void obs_output_set_audio_encoder(
obs_output_t *output,
obs_encoder_t *encoder);
obs_encoder_t *obs_output_get_audio_encoder(
const obs_output_t *output);
obs_encoder_t *obs_audio_encoder_create(
const char *id,
const char *name,
obs_data_t *settings);
Changed to:
--------------------------
/* 'idx' specifies the track index of the output */
void obs_output_set_audio_encoder(
obs_output_t *output,
obs_encoder_t *encoder,
size_t idx);
/* 'idx' specifies the track index of the output */
obs_encoder_t *obs_output_get_audio_encoder(
const obs_output_t *output,
size_t idx);
/* 'mixer_idx' specifies the mixer index to capture audio from */
obs_encoder_t *obs_audio_encoder_create(
const char *id,
const char *name,
obs_data_t *settings,
size_t mixer_idx);
Overview
--------------------------
This feature allows multiple audio mixers to be used at a time. This
capability was able to be added with surprisingly very little extra
overhead. Audio will not be mixed unless it's assigned to a specific
mixer, and mixers will not mix unless they have an active mix
connection.
Mostly this will be useful for being able to separate out specific audio
for recording versus streaming, but will also be useful for certain
streaming services that support multiple audio streams via RTMP.
I didn't want to use a variable amount of mixers due to the desire to
reduce heap allocations, so currently I set the limit to 4 simultaneous
mixers; this number can be increased later if needed, but honestly I
feel like it's just the right number to use.
Sources:
Sources can now specify which audio mixers their audio is mixed to; this
can be a single mixer or multiple mixers at a time. The
obs_source_set_audio_mixers function sets the audio mixer which an audio
source applies to. For example, 0xF would mean that the source applies
to all four mixers.
Audio Encoders:
Audio encoders now must specify which specific audio mixer they use when
they encode audio data.
Outputs:
Outputs that use encoders can now support multiple audio tracks at once
if they have the OBS_OUTPUT_MULTI_TRACK capability flag set. This is
mostly only useful for certain types of RTMP transmissions, though may
be useful for file formats that support multiple audio tracks as well
later on.
2015-01-14 02:12:08 -08:00
|
|
|
calldata_set_ptr(&data, "source", source);
|
|
|
|
calldata_set_int(&data, "mixers", mixers);
|
|
|
|
|
|
|
|
signal_handler_signal(source->context.signals, "audio_mixers", &data);
|
|
|
|
|
|
|
|
mixers = (uint32_t)calldata_int(&data, "mixers");
|
|
|
|
|
2015-12-17 04:03:35 -08:00
|
|
|
source->audio_mixers = mixers;
|
(API Change) Add support for multiple audio mixers
API changed:
--------------------------
void obs_output_set_audio_encoder(
obs_output_t *output,
obs_encoder_t *encoder);
obs_encoder_t *obs_output_get_audio_encoder(
const obs_output_t *output);
obs_encoder_t *obs_audio_encoder_create(
const char *id,
const char *name,
obs_data_t *settings);
Changed to:
--------------------------
/* 'idx' specifies the track index of the output */
void obs_output_set_audio_encoder(
obs_output_t *output,
obs_encoder_t *encoder,
size_t idx);
/* 'idx' specifies the track index of the output */
obs_encoder_t *obs_output_get_audio_encoder(
const obs_output_t *output,
size_t idx);
/* 'mixer_idx' specifies the mixer index to capture audio from */
obs_encoder_t *obs_audio_encoder_create(
const char *id,
const char *name,
obs_data_t *settings,
size_t mixer_idx);
Overview
--------------------------
This feature allows multiple audio mixers to be used at a time. This
capability was able to be added with surprisingly very little extra
overhead. Audio will not be mixed unless it's assigned to a specific
mixer, and mixers will not mix unless they have an active mix
connection.
Mostly this will be useful for being able to separate out specific audio
for recording versus streaming, but will also be useful for certain
streaming services that support multiple audio streams via RTMP.
I didn't want to use a variable amount of mixers due to the desire to
reduce heap allocations, so currently I set the limit to 4 simultaneous
mixers; this number can be increased later if needed, but honestly I
feel like it's just the right number to use.
Sources:
Sources can now specify which audio mixers their audio is mixed to; this
can be a single mixer or multiple mixers at a time. The
obs_source_set_audio_mixers function sets the audio mixer which an audio
source applies to. For example, 0xF would mean that the source applies
to all four mixers.
Audio Encoders:
Audio encoders now must specify which specific audio mixer they use when
they encode audio data.
Outputs:
Outputs that use encoders can now support multiple audio tracks at once
if they have the OBS_OUTPUT_MULTI_TRACK capability flag set. This is
mostly only useful for certain types of RTMP transmissions, though may
be useful for file formats that support multiple audio tracks as well
later on.
2015-01-14 02:12:08 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
uint32_t obs_source_get_audio_mixers(const obs_source_t *source)
|
|
|
|
{
|
2015-10-17 02:51:13 -07:00
|
|
|
if (!obs_source_valid(source, "obs_source_get_audio_mixers"))
|
|
|
|
return 0;
|
|
|
|
if ((source->info.output_flags & OBS_SOURCE_AUDIO) == 0)
|
|
|
|
return 0;
|
(API Change) Add support for multiple audio mixers
API changed:
--------------------------
void obs_output_set_audio_encoder(
obs_output_t *output,
obs_encoder_t *encoder);
obs_encoder_t *obs_output_get_audio_encoder(
const obs_output_t *output);
obs_encoder_t *obs_audio_encoder_create(
const char *id,
const char *name,
obs_data_t *settings);
Changed to:
--------------------------
/* 'idx' specifies the track index of the output */
void obs_output_set_audio_encoder(
obs_output_t *output,
obs_encoder_t *encoder,
size_t idx);
/* 'idx' specifies the track index of the output */
obs_encoder_t *obs_output_get_audio_encoder(
const obs_output_t *output,
size_t idx);
/* 'mixer_idx' specifies the mixer index to capture audio from */
obs_encoder_t *obs_audio_encoder_create(
const char *id,
const char *name,
obs_data_t *settings,
size_t mixer_idx);
Overview
--------------------------
This feature allows multiple audio mixers to be used at a time. This
capability was able to be added with surprisingly very little extra
overhead. Audio will not be mixed unless it's assigned to a specific
mixer, and mixers will not mix unless they have an active mix
connection.
Mostly this will be useful for being able to separate out specific audio
for recording versus streaming, but will also be useful for certain
streaming services that support multiple audio streams via RTMP.
I didn't want to use a variable amount of mixers due to the desire to
reduce heap allocations, so currently I set the limit to 4 simultaneous
mixers; this number can be increased later if needed, but honestly I
feel like it's just the right number to use.
Sources:
Sources can now specify which audio mixers their audio is mixed to; this
can be a single mixer or multiple mixers at a time. The
obs_source_set_audio_mixers function sets the audio mixer which an audio
source applies to. For example, 0xF would mean that the source applies
to all four mixers.
Audio Encoders:
Audio encoders now must specify which specific audio mixer they use when
they encode audio data.
Outputs:
Outputs that use encoders can now support multiple audio tracks at once
if they have the OBS_OUTPUT_MULTI_TRACK capability flag set. This is
mostly only useful for certain types of RTMP transmissions, though may
be useful for file formats that support multiple audio tracks as well
later on.
2015-01-14 02:12:08 -08:00
|
|
|
|
2015-12-17 04:03:35 -08:00
|
|
|
return source->audio_mixers;
|
(API Change) Add support for multiple audio mixers
API changed:
--------------------------
void obs_output_set_audio_encoder(
obs_output_t *output,
obs_encoder_t *encoder);
obs_encoder_t *obs_output_get_audio_encoder(
const obs_output_t *output);
obs_encoder_t *obs_audio_encoder_create(
const char *id,
const char *name,
obs_data_t *settings);
Changed to:
--------------------------
/* 'idx' specifies the track index of the output */
void obs_output_set_audio_encoder(
obs_output_t *output,
obs_encoder_t *encoder,
size_t idx);
/* 'idx' specifies the track index of the output */
obs_encoder_t *obs_output_get_audio_encoder(
const obs_output_t *output,
size_t idx);
/* 'mixer_idx' specifies the mixer index to capture audio from */
obs_encoder_t *obs_audio_encoder_create(
const char *id,
const char *name,
obs_data_t *settings,
size_t mixer_idx);
Overview
--------------------------
This feature allows multiple audio mixers to be used at a time. This
capability was able to be added with surprisingly very little extra
overhead. Audio will not be mixed unless it's assigned to a specific
mixer, and mixers will not mix unless they have an active mix
connection.
Mostly this will be useful for being able to separate out specific audio
for recording versus streaming, but will also be useful for certain
streaming services that support multiple audio streams via RTMP.
I didn't want to use a variable amount of mixers due to the desire to
reduce heap allocations, so currently I set the limit to 4 simultaneous
mixers; this number can be increased later if needed, but honestly I
feel like it's just the right number to use.
Sources:
Sources can now specify which audio mixers their audio is mixed to; this
can be a single mixer or multiple mixers at a time. The
obs_source_set_audio_mixers function sets the audio mixer which an audio
source applies to. For example, 0xF would mean that the source applies
to all four mixers.
Audio Encoders:
Audio encoders now must specify which specific audio mixer they use when
they encode audio data.
Outputs:
Outputs that use encoders can now support multiple audio tracks at once
if they have the OBS_OUTPUT_MULTI_TRACK capability flag set. This is
mostly only useful for certain types of RTMP transmissions, though may
be useful for file formats that support multiple audio tracks as well
later on.
2015-01-14 02:12:08 -08:00
|
|
|
}
|
|
|
|
|
Add helper functions for drawing sources
If you look at the previous commits, you'll see I had added
obs_source_draw before. For custom drawn sources in particular, each
time obs_source_draw was called, it would restart the effect and its
passes for each draw call, which was not optimal. It should really use
the effect functions for that. I'll have to add a function to simplify
effect usage.
I also realized that including the color matrix parameters in
obs_source_draw made the function kind of messy to use; instead,
separating the color matrix stuff out to
obs_source_draw_set_color_matrix feels a lot more clean.
On top of that, having the ability to set the position would be nice to
have as well, rather than having to mess with the matrix stuff each
time, so I also added that for the sake of convenience.
obs_source_draw will draw a texture sprite, optionally of a specific
size and/or at a specific position, as well as optionally inverted. The
texture used will be set to the 'image' parameter of whatever effect is
currently active.
obs_source_draw_set_color_matrix will set the color matrix value if the
drawing requires color matrices. It will set the 'color_matrix',
'color_range_min', and 'color_range_max' parameters of whatever effect
is currently active.
Overall, these feel much more clean to use than the previous iteration.
2014-11-19 17:36:12 -08:00
|
|
|
void obs_source_draw_set_color_matrix(const struct matrix4 *color_matrix,
|
2019-06-22 22:13:45 -07:00
|
|
|
const struct vec3 *color_range_min,
|
|
|
|
const struct vec3 *color_range_max)
|
Add helper functions for drawing sources
If you look at the previous commits, you'll see I had added
obs_source_draw before. For custom drawn sources in particular, each
time obs_source_draw was called, it would restart the effect and its
passes for each draw call, which was not optimal. It should really use
the effect functions for that. I'll have to add a function to simplify
effect usage.
I also realized that including the color matrix parameters in
obs_source_draw made the function kind of messy to use; instead,
separating the color matrix stuff out to
obs_source_draw_set_color_matrix feels a lot more clean.
On top of that, having the ability to set the position would be nice to
have as well, rather than having to mess with the matrix stuff each
time, so I also added that for the sake of convenience.
obs_source_draw will draw a texture sprite, optionally of a specific
size and/or at a specific position, as well as optionally inverted. The
texture used will be set to the 'image' parameter of whatever effect is
currently active.
obs_source_draw_set_color_matrix will set the color matrix value if the
drawing requires color matrices. It will set the 'color_matrix',
'color_range_min', and 'color_range_max' parameters of whatever effect
is currently active.
Overall, these feel much more clean to use than the previous iteration.
2014-11-19 17:36:12 -08:00
|
|
|
{
|
2014-11-26 23:29:36 -08:00
|
|
|
struct vec3 color_range_min_def;
|
|
|
|
struct vec3 color_range_max_def;
|
|
|
|
|
|
|
|
vec3_set(&color_range_min_def, 0.0f, 0.0f, 0.0f);
|
|
|
|
vec3_set(&color_range_max_def, 1.0f, 1.0f, 1.0f);
|
|
|
|
|
Add helper functions for drawing sources
If you look at the previous commits, you'll see I had added
obs_source_draw before. For custom drawn sources in particular, each
time obs_source_draw was called, it would restart the effect and its
passes for each draw call, which was not optimal. It should really use
the effect functions for that. I'll have to add a function to simplify
effect usage.
I also realized that including the color matrix parameters in
obs_source_draw made the function kind of messy to use; instead,
separating the color matrix stuff out to
obs_source_draw_set_color_matrix feels a lot more clean.
On top of that, having the ability to set the position would be nice to
have as well, rather than having to mess with the matrix stuff each
time, so I also added that for the sake of convenience.
obs_source_draw will draw a texture sprite, optionally of a specific
size and/or at a specific position, as well as optionally inverted. The
texture used will be set to the 'image' parameter of whatever effect is
currently active.
obs_source_draw_set_color_matrix will set the color matrix value if the
drawing requires color matrices. It will set the 'color_matrix',
'color_range_min', and 'color_range_max' parameters of whatever effect
is currently active.
Overall, these feel much more clean to use than the previous iteration.
2014-11-19 17:36:12 -08:00
|
|
|
gs_effect_t *effect = gs_get_effect();
|
|
|
|
gs_eparam_t *matrix;
|
|
|
|
gs_eparam_t *range_min;
|
|
|
|
gs_eparam_t *range_max;
|
|
|
|
|
|
|
|
if (!effect) {
|
2015-10-17 02:51:13 -07:00
|
|
|
blog(LOG_WARNING, "obs_source_draw_set_color_matrix: no "
|
2019-06-22 22:13:45 -07:00
|
|
|
"active effect!");
|
Add helper functions for drawing sources
If you look at the previous commits, you'll see I had added
obs_source_draw before. For custom drawn sources in particular, each
time obs_source_draw was called, it would restart the effect and its
passes for each draw call, which was not optimal. It should really use
the effect functions for that. I'll have to add a function to simplify
effect usage.
I also realized that including the color matrix parameters in
obs_source_draw made the function kind of messy to use; instead,
separating the color matrix stuff out to
obs_source_draw_set_color_matrix feels a lot more clean.
On top of that, having the ability to set the position would be nice to
have as well, rather than having to mess with the matrix stuff each
time, so I also added that for the sake of convenience.
obs_source_draw will draw a texture sprite, optionally of a specific
size and/or at a specific position, as well as optionally inverted. The
texture used will be set to the 'image' parameter of whatever effect is
currently active.
obs_source_draw_set_color_matrix will set the color matrix value if the
drawing requires color matrices. It will set the 'color_matrix',
'color_range_min', and 'color_range_max' parameters of whatever effect
is currently active.
Overall, these feel much more clean to use than the previous iteration.
2014-11-19 17:36:12 -08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2015-10-17 02:51:13 -07:00
|
|
|
if (!obs_ptr_valid(color_matrix, "obs_source_draw_set_color_matrix"))
|
Add helper functions for drawing sources
If you look at the previous commits, you'll see I had added
obs_source_draw before. For custom drawn sources in particular, each
time obs_source_draw was called, it would restart the effect and its
passes for each draw call, which was not optimal. It should really use
the effect functions for that. I'll have to add a function to simplify
effect usage.
I also realized that including the color matrix parameters in
obs_source_draw made the function kind of messy to use; instead,
separating the color matrix stuff out to
obs_source_draw_set_color_matrix feels a lot more clean.
On top of that, having the ability to set the position would be nice to
have as well, rather than having to mess with the matrix stuff each
time, so I also added that for the sake of convenience.
obs_source_draw will draw a texture sprite, optionally of a specific
size and/or at a specific position, as well as optionally inverted. The
texture used will be set to the 'image' parameter of whatever effect is
currently active.
obs_source_draw_set_color_matrix will set the color matrix value if the
drawing requires color matrices. It will set the 'color_matrix',
'color_range_min', and 'color_range_max' parameters of whatever effect
is currently active.
Overall, these feel much more clean to use than the previous iteration.
2014-11-19 17:36:12 -08:00
|
|
|
return;
|
|
|
|
|
|
|
|
if (!color_range_min)
|
|
|
|
color_range_min = &color_range_min_def;
|
|
|
|
if (!color_range_max)
|
|
|
|
color_range_max = &color_range_max_def;
|
|
|
|
|
|
|
|
matrix = gs_effect_get_param_by_name(effect, "color_matrix");
|
|
|
|
range_min = gs_effect_get_param_by_name(effect, "color_range_min");
|
|
|
|
range_max = gs_effect_get_param_by_name(effect, "color_range_max");
|
|
|
|
|
|
|
|
gs_effect_set_matrix4(matrix, color_matrix);
|
2019-06-22 22:13:45 -07:00
|
|
|
gs_effect_set_val(range_min, color_range_min, sizeof(float) * 3);
|
|
|
|
gs_effect_set_val(range_max, color_range_max, sizeof(float) * 3);
|
Add helper functions for drawing sources
If you look at the previous commits, you'll see I had added
obs_source_draw before. For custom drawn sources in particular, each
time obs_source_draw was called, it would restart the effect and its
passes for each draw call, which was not optimal. It should really use
the effect functions for that. I'll have to add a function to simplify
effect usage.
I also realized that including the color matrix parameters in
obs_source_draw made the function kind of messy to use; instead,
separating the color matrix stuff out to
obs_source_draw_set_color_matrix feels a lot more clean.
On top of that, having the ability to set the position would be nice to
have as well, rather than having to mess with the matrix stuff each
time, so I also added that for the sake of convenience.
obs_source_draw will draw a texture sprite, optionally of a specific
size and/or at a specific position, as well as optionally inverted. The
texture used will be set to the 'image' parameter of whatever effect is
currently active.
obs_source_draw_set_color_matrix will set the color matrix value if the
drawing requires color matrices. It will set the 'color_matrix',
'color_range_min', and 'color_range_max' parameters of whatever effect
is currently active.
Overall, these feel much more clean to use than the previous iteration.
2014-11-19 17:36:12 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
void obs_source_draw(gs_texture_t *texture, int x, int y, uint32_t cx,
|
2019-06-22 22:13:45 -07:00
|
|
|
uint32_t cy, bool flip)
|
Add helper functions for drawing sources
If you look at the previous commits, you'll see I had added
obs_source_draw before. For custom drawn sources in particular, each
time obs_source_draw was called, it would restart the effect and its
passes for each draw call, which was not optimal. It should really use
the effect functions for that. I'll have to add a function to simplify
effect usage.
I also realized that including the color matrix parameters in
obs_source_draw made the function kind of messy to use; instead,
separating the color matrix stuff out to
obs_source_draw_set_color_matrix feels a lot more clean.
On top of that, having the ability to set the position would be nice to
have as well, rather than having to mess with the matrix stuff each
time, so I also added that for the sake of convenience.
obs_source_draw will draw a texture sprite, optionally of a specific
size and/or at a specific position, as well as optionally inverted. The
texture used will be set to the 'image' parameter of whatever effect is
currently active.
obs_source_draw_set_color_matrix will set the color matrix value if the
drawing requires color matrices. It will set the 'color_matrix',
'color_range_min', and 'color_range_max' parameters of whatever effect
is currently active.
Overall, these feel much more clean to use than the previous iteration.
2014-11-19 17:36:12 -08:00
|
|
|
{
|
|
|
|
gs_effect_t *effect = gs_get_effect();
|
|
|
|
bool change_pos = (x != 0 || y != 0);
|
|
|
|
gs_eparam_t *image;
|
|
|
|
|
|
|
|
if (!effect) {
|
2015-10-17 02:51:13 -07:00
|
|
|
blog(LOG_WARNING, "obs_source_draw: no active effect!");
|
Add helper functions for drawing sources
If you look at the previous commits, you'll see I had added
obs_source_draw before. For custom drawn sources in particular, each
time obs_source_draw was called, it would restart the effect and its
passes for each draw call, which was not optimal. It should really use
the effect functions for that. I'll have to add a function to simplify
effect usage.
I also realized that including the color matrix parameters in
obs_source_draw made the function kind of messy to use; instead,
separating the color matrix stuff out to
obs_source_draw_set_color_matrix feels a lot more clean.
On top of that, having the ability to set the position would be nice to
have as well, rather than having to mess with the matrix stuff each
time, so I also added that for the sake of convenience.
obs_source_draw will draw a texture sprite, optionally of a specific
size and/or at a specific position, as well as optionally inverted. The
texture used will be set to the 'image' parameter of whatever effect is
currently active.
obs_source_draw_set_color_matrix will set the color matrix value if the
drawing requires color matrices. It will set the 'color_matrix',
'color_range_min', and 'color_range_max' parameters of whatever effect
is currently active.
Overall, these feel much more clean to use than the previous iteration.
2014-11-19 17:36:12 -08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2015-10-17 02:51:13 -07:00
|
|
|
if (!obs_ptr_valid(texture, "obs_source_draw"))
|
Add helper functions for drawing sources
If you look at the previous commits, you'll see I had added
obs_source_draw before. For custom drawn sources in particular, each
time obs_source_draw was called, it would restart the effect and its
passes for each draw call, which was not optimal. It should really use
the effect functions for that. I'll have to add a function to simplify
effect usage.
I also realized that including the color matrix parameters in
obs_source_draw made the function kind of messy to use; instead,
separating the color matrix stuff out to
obs_source_draw_set_color_matrix feels a lot more clean.
On top of that, having the ability to set the position would be nice to
have as well, rather than having to mess with the matrix stuff each
time, so I also added that for the sake of convenience.
obs_source_draw will draw a texture sprite, optionally of a specific
size and/or at a specific position, as well as optionally inverted. The
texture used will be set to the 'image' parameter of whatever effect is
currently active.
obs_source_draw_set_color_matrix will set the color matrix value if the
drawing requires color matrices. It will set the 'color_matrix',
'color_range_min', and 'color_range_max' parameters of whatever effect
is currently active.
Overall, these feel much more clean to use than the previous iteration.
2014-11-19 17:36:12 -08:00
|
|
|
return;
|
|
|
|
|
|
|
|
image = gs_effect_get_param_by_name(effect, "image");
|
|
|
|
gs_effect_set_texture(image, texture);
|
|
|
|
|
|
|
|
if (change_pos) {
|
|
|
|
gs_matrix_push();
|
|
|
|
gs_matrix_translate3f((float)x, (float)y, 0.0f);
|
|
|
|
}
|
|
|
|
|
|
|
|
gs_draw_sprite(texture, flip ? GS_FLIP_V : 0, cx, cy);
|
|
|
|
|
|
|
|
if (change_pos)
|
|
|
|
gs_matrix_pop();
|
|
|
|
}
|
libobs: Refactor source volume transition design
This changes the way source volume handles transitioning between being
active and inactive states.
The previous way that transitioning handled volume was that it set the
presentation volume of the source and all of its sub-sources to 0.0 if
the source was inactive, and 1.0 if active. Transition sources would
then also set the presentation volume for sub-sources to whatever their
transitioning volume was. However, the problem with this is that the
design didn't take in to account if the source or its sub-sources were
active anywhere else, so because of that it would break if that ever
happened, and I didn't realize that when I was designing it.
So instead, this completely overhauls the design of handling
transitioning volume. Each frame, it'll go through all sources and
check whether they're active or inactive and set the base volume
accordingly. If transitions are currently active, it will actually walk
the active source tree and check whether the source is in a
transitioning state somewhere.
- If the source is a sub-source of a transition, and it's not active
outside of the transition, then the transition will control the
volume of the source.
- If the source is a sub-source of a transition, but it's also active
outside of the transition, it'll defer to whichever is louder.
This also adds a new callback to the obs_source_info structure for
transition sources, get_transition_volume, which is called to get the
transitioning volume of a sub-source.
2014-12-27 22:16:10 -08:00
|
|
|
|
2015-03-02 00:44:42 -08:00
|
|
|
void obs_source_inc_showing(obs_source_t *source)
|
|
|
|
{
|
2015-10-17 02:51:13 -07:00
|
|
|
if (obs_source_valid(source, "obs_source_inc_showing"))
|
|
|
|
obs_source_activate(source, AUX_VIEW);
|
2015-03-02 00:44:42 -08:00
|
|
|
}
|
|
|
|
|
2019-05-11 21:23:17 -07:00
|
|
|
void obs_source_inc_active(obs_source_t *source)
|
|
|
|
{
|
|
|
|
if (obs_source_valid(source, "obs_source_inc_active"))
|
|
|
|
obs_source_activate(source, MAIN_VIEW);
|
|
|
|
}
|
|
|
|
|
2015-03-02 00:44:42 -08:00
|
|
|
void obs_source_dec_showing(obs_source_t *source)
|
|
|
|
{
|
2015-10-17 02:51:13 -07:00
|
|
|
if (obs_source_valid(source, "obs_source_dec_showing"))
|
|
|
|
obs_source_deactivate(source, AUX_VIEW);
|
2015-03-02 00:44:42 -08:00
|
|
|
}
|
2015-02-25 21:14:11 -08:00
|
|
|
|
2019-05-11 21:23:17 -07:00
|
|
|
void obs_source_dec_active(obs_source_t *source)
|
|
|
|
{
|
|
|
|
if (obs_source_valid(source, "obs_source_dec_active"))
|
|
|
|
obs_source_deactivate(source, MAIN_VIEW);
|
|
|
|
}
|
|
|
|
|
2015-02-25 21:14:11 -08:00
|
|
|
void obs_source_enum_filters(obs_source_t *source,
|
2019-06-22 22:13:45 -07:00
|
|
|
obs_source_enum_proc_t callback, void *param)
|
2015-02-25 21:14:11 -08:00
|
|
|
{
|
2015-10-17 02:51:13 -07:00
|
|
|
if (!obs_source_valid(source, "obs_source_enum_filters"))
|
|
|
|
return;
|
|
|
|
if (!obs_ptr_valid(callback, "obs_source_enum_filters"))
|
2015-02-25 21:14:11 -08:00
|
|
|
return;
|
|
|
|
|
|
|
|
pthread_mutex_lock(&source->filter_mutex);
|
|
|
|
|
2015-03-14 07:02:23 -07:00
|
|
|
for (size_t i = source->filters.num; i > 0; i--) {
|
|
|
|
struct obs_source *filter = source->filters.array[i - 1];
|
2015-02-25 21:14:11 -08:00
|
|
|
callback(source, filter, param);
|
|
|
|
}
|
|
|
|
|
|
|
|
pthread_mutex_unlock(&source->filter_mutex);
|
|
|
|
}
|
2015-02-25 21:14:57 -08:00
|
|
|
|
|
|
|
obs_source_t *obs_source_get_filter_by_name(obs_source_t *source,
|
2019-06-22 22:13:45 -07:00
|
|
|
const char *name)
|
2015-02-25 21:14:57 -08:00
|
|
|
{
|
|
|
|
obs_source_t *filter = NULL;
|
|
|
|
|
2015-10-17 02:51:13 -07:00
|
|
|
if (!obs_source_valid(source, "obs_source_get_filter_by_name"))
|
|
|
|
return NULL;
|
|
|
|
if (!obs_ptr_valid(name, "obs_source_get_filter_by_name"))
|
2015-02-25 21:14:57 -08:00
|
|
|
return NULL;
|
|
|
|
|
|
|
|
pthread_mutex_lock(&source->filter_mutex);
|
|
|
|
|
|
|
|
for (size_t i = 0; i < source->filters.num; i++) {
|
|
|
|
struct obs_source *cur_filter = source->filters.array[i];
|
|
|
|
if (strcmp(cur_filter->context.name, name) == 0) {
|
|
|
|
filter = cur_filter;
|
|
|
|
obs_source_addref(filter);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
pthread_mutex_unlock(&source->filter_mutex);
|
|
|
|
|
|
|
|
return filter;
|
|
|
|
}
|
2015-03-17 18:15:50 -07:00
|
|
|
|
|
|
|
bool obs_source_enabled(const obs_source_t *source)
|
|
|
|
{
|
2019-06-22 22:13:45 -07:00
|
|
|
return obs_source_valid(source, "obs_source_enabled") ? source->enabled
|
|
|
|
: false;
|
2015-03-17 18:15:50 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
void obs_source_set_enabled(obs_source_t *source, bool enabled)
|
|
|
|
{
|
2016-01-18 20:01:58 -08:00
|
|
|
struct calldata data;
|
|
|
|
uint8_t stack[128];
|
2015-03-17 18:15:50 -07:00
|
|
|
|
2015-10-17 02:51:13 -07:00
|
|
|
if (!obs_source_valid(source, "obs_source_set_enabled"))
|
2015-03-17 18:15:50 -07:00
|
|
|
return;
|
|
|
|
|
|
|
|
source->enabled = enabled;
|
|
|
|
|
2016-01-18 20:01:58 -08:00
|
|
|
calldata_init_fixed(&data, stack, sizeof(stack));
|
2015-03-17 18:15:50 -07:00
|
|
|
calldata_set_ptr(&data, "source", source);
|
|
|
|
calldata_set_bool(&data, "enabled", enabled);
|
|
|
|
|
|
|
|
signal_handler_signal(source->context.signals, "enable", &data);
|
|
|
|
}
|
2015-03-22 14:54:07 -07:00
|
|
|
|
|
|
|
bool obs_source_muted(const obs_source_t *source)
|
|
|
|
{
|
2019-06-22 22:13:45 -07:00
|
|
|
return obs_source_valid(source, "obs_source_muted") ? source->user_muted
|
|
|
|
: false;
|
2015-03-22 14:54:07 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
void obs_source_set_muted(obs_source_t *source, bool muted)
|
|
|
|
{
|
2016-01-18 20:01:58 -08:00
|
|
|
struct calldata data;
|
|
|
|
uint8_t stack[128];
|
2019-06-22 22:13:45 -07:00
|
|
|
struct audio_action action = {.timestamp = os_gettime_ns(),
|
|
|
|
.type = AUDIO_ACTION_MUTE,
|
|
|
|
.set = muted};
|
2015-03-22 14:54:07 -07:00
|
|
|
|
2015-10-17 02:51:13 -07:00
|
|
|
if (!obs_source_valid(source, "obs_source_set_muted"))
|
2015-03-22 14:54:07 -07:00
|
|
|
return;
|
|
|
|
|
libobs: Implement new audio subsystem
The new audio subsystem fixes two issues:
- First Primary issue it fixes is the ability for parent sources to
intercept the audio of child sources, and do custom processing on
them. The main reason for this was the ability to do custom
cross-fading in transitions, but it's also useful for things such as
side-chain effects, applying audio effects to entire scenes, applying
scene-specific audio filters on sub-sources, and other such
possibilities.
- The secondary issue that needed fixing was audio buffering.
Previously, audio buffering was always a fixed buffer size, so it
would always have exactly a certain number of milliseconds of audio
buffering (and thus output delay). Instead, it now dynamically
increases audio buffering only as necessary, minimizing output delay,
and removing the need for users to have to worry about an audio
buffering setting.
The new design makes it so that audio from the leaves of the scene graph
flow to the root nodes, and can be intercepted by parent sources. Each
audio source handles its own buffering, and each audio tick a specific
number of audio frames are popped from the front of the circular buffer
on each audio source. Composite sources (such as scenes) can access the
audio for child sources and do custom processing or mixing on that
audio. Composite sources use the audio_render callback of sources to do
synchronous or deferred audio processing per audio tick. Things like
scenes now mix audio from their sub-sources.
2015-12-20 03:06:35 -08:00
|
|
|
source->user_muted = muted;
|
2015-03-22 14:54:07 -07:00
|
|
|
|
2016-01-18 20:01:58 -08:00
|
|
|
calldata_init_fixed(&data, stack, sizeof(stack));
|
2015-03-22 14:54:07 -07:00
|
|
|
calldata_set_ptr(&data, "source", source);
|
|
|
|
calldata_set_bool(&data, "muted", muted);
|
|
|
|
|
|
|
|
signal_handler_signal(source->context.signals, "mute", &data);
|
|
|
|
|
libobs: Implement new audio subsystem
The new audio subsystem fixes two issues:
- First Primary issue it fixes is the ability for parent sources to
intercept the audio of child sources, and do custom processing on
them. The main reason for this was the ability to do custom
cross-fading in transitions, but it's also useful for things such as
side-chain effects, applying audio effects to entire scenes, applying
scene-specific audio filters on sub-sources, and other such
possibilities.
- The secondary issue that needed fixing was audio buffering.
Previously, audio buffering was always a fixed buffer size, so it
would always have exactly a certain number of milliseconds of audio
buffering (and thus output delay). Instead, it now dynamically
increases audio buffering only as necessary, minimizing output delay,
and removing the need for users to have to worry about an audio
buffering setting.
The new design makes it so that audio from the leaves of the scene graph
flow to the root nodes, and can be intercepted by parent sources. Each
audio source handles its own buffering, and each audio tick a specific
number of audio frames are popped from the front of the circular buffer
on each audio source. Composite sources (such as scenes) can access the
audio for child sources and do custom processing or mixing on that
audio. Composite sources use the audio_render callback of sources to do
synchronous or deferred audio processing per audio tick. Things like
scenes now mix audio from their sub-sources.
2015-12-20 03:06:35 -08:00
|
|
|
pthread_mutex_lock(&source->audio_actions_mutex);
|
|
|
|
da_push_back(source->audio_actions, &action);
|
|
|
|
pthread_mutex_unlock(&source->audio_actions_mutex);
|
2015-03-22 14:54:07 -07:00
|
|
|
}
|
2015-04-30 18:22:12 -07:00
|
|
|
|
|
|
|
static void source_signal_push_to_changed(obs_source_t *source,
|
2019-06-22 22:13:45 -07:00
|
|
|
const char *signal, bool enabled)
|
2015-04-30 18:22:12 -07:00
|
|
|
{
|
2016-01-18 20:01:58 -08:00
|
|
|
struct calldata data;
|
|
|
|
uint8_t stack[128];
|
2015-04-30 18:22:12 -07:00
|
|
|
|
2016-01-18 20:01:58 -08:00
|
|
|
calldata_init_fixed(&data, stack, sizeof(stack));
|
2019-06-22 22:13:45 -07:00
|
|
|
calldata_set_ptr(&data, "source", source);
|
2015-04-30 18:22:12 -07:00
|
|
|
calldata_set_bool(&data, "enabled", enabled);
|
|
|
|
|
|
|
|
signal_handler_signal(source->context.signals, signal, &data);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void source_signal_push_to_delay(obs_source_t *source,
|
2019-06-22 22:13:45 -07:00
|
|
|
const char *signal, uint64_t delay)
|
2015-04-30 18:22:12 -07:00
|
|
|
{
|
2016-01-18 20:01:58 -08:00
|
|
|
struct calldata data;
|
|
|
|
uint8_t stack[128];
|
2015-04-30 18:22:12 -07:00
|
|
|
|
2016-01-18 20:01:58 -08:00
|
|
|
calldata_init_fixed(&data, stack, sizeof(stack));
|
2019-06-22 22:13:45 -07:00
|
|
|
calldata_set_ptr(&data, "source", source);
|
|
|
|
calldata_set_bool(&data, "delay", delay);
|
2015-04-30 18:22:12 -07:00
|
|
|
|
|
|
|
signal_handler_signal(source->context.signals, signal, &data);
|
|
|
|
}
|
|
|
|
|
|
|
|
bool obs_source_push_to_mute_enabled(obs_source_t *source)
|
|
|
|
{
|
|
|
|
bool enabled;
|
2015-10-17 02:51:13 -07:00
|
|
|
if (!obs_source_valid(source, "obs_source_push_to_mute_enabled"))
|
|
|
|
return false;
|
2015-04-30 18:22:12 -07:00
|
|
|
|
|
|
|
pthread_mutex_lock(&source->audio_mutex);
|
|
|
|
enabled = source->push_to_mute_enabled;
|
|
|
|
pthread_mutex_unlock(&source->audio_mutex);
|
|
|
|
|
|
|
|
return enabled;
|
|
|
|
}
|
|
|
|
|
|
|
|
void obs_source_enable_push_to_mute(obs_source_t *source, bool enabled)
|
|
|
|
{
|
2015-10-17 02:51:13 -07:00
|
|
|
if (!obs_source_valid(source, "obs_source_enable_push_to_mute"))
|
|
|
|
return;
|
2015-04-30 18:22:12 -07:00
|
|
|
|
|
|
|
pthread_mutex_lock(&source->audio_mutex);
|
|
|
|
bool changed = source->push_to_mute_enabled != enabled;
|
|
|
|
if (obs_source_get_output_flags(source) & OBS_SOURCE_AUDIO && changed)
|
|
|
|
blog(LOG_INFO, "source '%s' %s push-to-mute",
|
2019-06-22 22:13:45 -07:00
|
|
|
obs_source_get_name(source),
|
|
|
|
enabled ? "enabled" : "disabled");
|
2015-04-30 18:22:12 -07:00
|
|
|
|
|
|
|
source->push_to_mute_enabled = enabled;
|
|
|
|
|
|
|
|
if (changed)
|
|
|
|
source_signal_push_to_changed(source, "push_to_mute_changed",
|
2019-06-22 22:13:45 -07:00
|
|
|
enabled);
|
2015-04-30 18:22:12 -07:00
|
|
|
pthread_mutex_unlock(&source->audio_mutex);
|
|
|
|
}
|
|
|
|
|
|
|
|
uint64_t obs_source_get_push_to_mute_delay(obs_source_t *source)
|
|
|
|
{
|
|
|
|
uint64_t delay;
|
2015-10-17 02:51:13 -07:00
|
|
|
if (!obs_source_valid(source, "obs_source_get_push_to_mute_delay"))
|
|
|
|
return 0;
|
2015-04-30 18:22:12 -07:00
|
|
|
|
|
|
|
pthread_mutex_lock(&source->audio_mutex);
|
|
|
|
delay = source->push_to_mute_delay;
|
|
|
|
pthread_mutex_unlock(&source->audio_mutex);
|
|
|
|
|
|
|
|
return delay;
|
|
|
|
}
|
|
|
|
|
|
|
|
void obs_source_set_push_to_mute_delay(obs_source_t *source, uint64_t delay)
|
|
|
|
{
|
2015-10-17 02:51:13 -07:00
|
|
|
if (!obs_source_valid(source, "obs_source_set_push_to_mute_delay"))
|
|
|
|
return;
|
2015-04-30 18:22:12 -07:00
|
|
|
|
|
|
|
pthread_mutex_lock(&source->audio_mutex);
|
|
|
|
source->push_to_mute_delay = delay;
|
|
|
|
|
|
|
|
source_signal_push_to_delay(source, "push_to_mute_delay", delay);
|
|
|
|
pthread_mutex_unlock(&source->audio_mutex);
|
|
|
|
}
|
|
|
|
|
|
|
|
bool obs_source_push_to_talk_enabled(obs_source_t *source)
|
|
|
|
{
|
|
|
|
bool enabled;
|
2015-10-17 02:51:13 -07:00
|
|
|
if (!obs_source_valid(source, "obs_source_push_to_talk_enabled"))
|
|
|
|
return false;
|
2015-04-30 18:22:12 -07:00
|
|
|
|
|
|
|
pthread_mutex_lock(&source->audio_mutex);
|
|
|
|
enabled = source->push_to_talk_enabled;
|
|
|
|
pthread_mutex_unlock(&source->audio_mutex);
|
|
|
|
|
|
|
|
return enabled;
|
|
|
|
}
|
|
|
|
|
|
|
|
void obs_source_enable_push_to_talk(obs_source_t *source, bool enabled)
|
|
|
|
{
|
2015-10-17 02:51:13 -07:00
|
|
|
if (!obs_source_valid(source, "obs_source_enable_push_to_talk"))
|
|
|
|
return;
|
2015-04-30 18:22:12 -07:00
|
|
|
|
|
|
|
pthread_mutex_lock(&source->audio_mutex);
|
|
|
|
bool changed = source->push_to_talk_enabled != enabled;
|
|
|
|
if (obs_source_get_output_flags(source) & OBS_SOURCE_AUDIO && changed)
|
|
|
|
blog(LOG_INFO, "source '%s' %s push-to-talk",
|
2019-06-22 22:13:45 -07:00
|
|
|
obs_source_get_name(source),
|
|
|
|
enabled ? "enabled" : "disabled");
|
2015-04-30 18:22:12 -07:00
|
|
|
|
|
|
|
source->push_to_talk_enabled = enabled;
|
|
|
|
|
|
|
|
if (changed)
|
|
|
|
source_signal_push_to_changed(source, "push_to_talk_changed",
|
2019-06-22 22:13:45 -07:00
|
|
|
enabled);
|
2015-04-30 18:22:12 -07:00
|
|
|
pthread_mutex_unlock(&source->audio_mutex);
|
|
|
|
}
|
|
|
|
|
|
|
|
uint64_t obs_source_get_push_to_talk_delay(obs_source_t *source)
|
|
|
|
{
|
|
|
|
uint64_t delay;
|
2015-10-17 02:51:13 -07:00
|
|
|
if (!obs_source_valid(source, "obs_source_get_push_to_talk_delay"))
|
|
|
|
return 0;
|
2015-04-30 18:22:12 -07:00
|
|
|
|
|
|
|
pthread_mutex_lock(&source->audio_mutex);
|
|
|
|
delay = source->push_to_talk_delay;
|
|
|
|
pthread_mutex_unlock(&source->audio_mutex);
|
|
|
|
|
|
|
|
return delay;
|
|
|
|
}
|
|
|
|
|
|
|
|
void obs_source_set_push_to_talk_delay(obs_source_t *source, uint64_t delay)
|
|
|
|
{
|
2015-10-17 02:51:13 -07:00
|
|
|
if (!obs_source_valid(source, "obs_source_set_push_to_talk_delay"))
|
|
|
|
return;
|
2015-04-30 18:22:12 -07:00
|
|
|
|
|
|
|
pthread_mutex_lock(&source->audio_mutex);
|
|
|
|
source->push_to_talk_delay = delay;
|
|
|
|
|
|
|
|
source_signal_push_to_delay(source, "push_to_talk_delay", delay);
|
|
|
|
pthread_mutex_unlock(&source->audio_mutex);
|
|
|
|
}
|
2015-09-15 22:51:37 -07:00
|
|
|
|
|
|
|
void *obs_source_get_type_data(obs_source_t *source)
|
|
|
|
{
|
|
|
|
return obs_source_valid(source, "obs_source_get_type_data")
|
2019-06-22 22:13:45 -07:00
|
|
|
? source->info.type_data
|
|
|
|
: NULL;
|
2015-09-15 22:51:37 -07:00
|
|
|
}
|
2015-12-17 04:28:35 -08:00
|
|
|
|
libobs: Implement new audio subsystem
The new audio subsystem fixes two issues:
- First Primary issue it fixes is the ability for parent sources to
intercept the audio of child sources, and do custom processing on
them. The main reason for this was the ability to do custom
cross-fading in transitions, but it's also useful for things such as
side-chain effects, applying audio effects to entire scenes, applying
scene-specific audio filters on sub-sources, and other such
possibilities.
- The secondary issue that needed fixing was audio buffering.
Previously, audio buffering was always a fixed buffer size, so it
would always have exactly a certain number of milliseconds of audio
buffering (and thus output delay). Instead, it now dynamically
increases audio buffering only as necessary, minimizing output delay,
and removing the need for users to have to worry about an audio
buffering setting.
The new design makes it so that audio from the leaves of the scene graph
flow to the root nodes, and can be intercepted by parent sources. Each
audio source handles its own buffering, and each audio tick a specific
number of audio frames are popped from the front of the circular buffer
on each audio source. Composite sources (such as scenes) can access the
audio for child sources and do custom processing or mixing on that
audio. Composite sources use the audio_render callback of sources to do
synchronous or deferred audio processing per audio tick. Things like
scenes now mix audio from their sub-sources.
2015-12-20 03:06:35 -08:00
|
|
|
static float get_source_volume(obs_source_t *source, uint64_t os_time)
|
|
|
|
{
|
|
|
|
if (source->push_to_mute_enabled && source->push_to_mute_pressed)
|
2019-06-22 22:13:45 -07:00
|
|
|
source->push_to_mute_stop_time =
|
|
|
|
os_time + source->push_to_mute_delay * 1000000;
|
libobs: Implement new audio subsystem
The new audio subsystem fixes two issues:
- First Primary issue it fixes is the ability for parent sources to
intercept the audio of child sources, and do custom processing on
them. The main reason for this was the ability to do custom
cross-fading in transitions, but it's also useful for things such as
side-chain effects, applying audio effects to entire scenes, applying
scene-specific audio filters on sub-sources, and other such
possibilities.
- The secondary issue that needed fixing was audio buffering.
Previously, audio buffering was always a fixed buffer size, so it
would always have exactly a certain number of milliseconds of audio
buffering (and thus output delay). Instead, it now dynamically
increases audio buffering only as necessary, minimizing output delay,
and removing the need for users to have to worry about an audio
buffering setting.
The new design makes it so that audio from the leaves of the scene graph
flow to the root nodes, and can be intercepted by parent sources. Each
audio source handles its own buffering, and each audio tick a specific
number of audio frames are popped from the front of the circular buffer
on each audio source. Composite sources (such as scenes) can access the
audio for child sources and do custom processing or mixing on that
audio. Composite sources use the audio_render callback of sources to do
synchronous or deferred audio processing per audio tick. Things like
scenes now mix audio from their sub-sources.
2015-12-20 03:06:35 -08:00
|
|
|
|
|
|
|
if (source->push_to_talk_enabled && source->push_to_talk_pressed)
|
2019-06-22 22:13:45 -07:00
|
|
|
source->push_to_talk_stop_time =
|
|
|
|
os_time + source->push_to_talk_delay * 1000000;
|
libobs: Implement new audio subsystem
The new audio subsystem fixes two issues:
- First Primary issue it fixes is the ability for parent sources to
intercept the audio of child sources, and do custom processing on
them. The main reason for this was the ability to do custom
cross-fading in transitions, but it's also useful for things such as
side-chain effects, applying audio effects to entire scenes, applying
scene-specific audio filters on sub-sources, and other such
possibilities.
- The secondary issue that needed fixing was audio buffering.
Previously, audio buffering was always a fixed buffer size, so it
would always have exactly a certain number of milliseconds of audio
buffering (and thus output delay). Instead, it now dynamically
increases audio buffering only as necessary, minimizing output delay,
and removing the need for users to have to worry about an audio
buffering setting.
The new design makes it so that audio from the leaves of the scene graph
flow to the root nodes, and can be intercepted by parent sources. Each
audio source handles its own buffering, and each audio tick a specific
number of audio frames are popped from the front of the circular buffer
on each audio source. Composite sources (such as scenes) can access the
audio for child sources and do custom processing or mixing on that
audio. Composite sources use the audio_render callback of sources to do
synchronous or deferred audio processing per audio tick. Things like
scenes now mix audio from their sub-sources.
2015-12-20 03:06:35 -08:00
|
|
|
|
|
|
|
bool push_to_mute_active = source->push_to_mute_pressed ||
|
2019-06-22 22:13:45 -07:00
|
|
|
os_time < source->push_to_mute_stop_time;
|
libobs: Implement new audio subsystem
The new audio subsystem fixes two issues:
- First Primary issue it fixes is the ability for parent sources to
intercept the audio of child sources, and do custom processing on
them. The main reason for this was the ability to do custom
cross-fading in transitions, but it's also useful for things such as
side-chain effects, applying audio effects to entire scenes, applying
scene-specific audio filters on sub-sources, and other such
possibilities.
- The secondary issue that needed fixing was audio buffering.
Previously, audio buffering was always a fixed buffer size, so it
would always have exactly a certain number of milliseconds of audio
buffering (and thus output delay). Instead, it now dynamically
increases audio buffering only as necessary, minimizing output delay,
and removing the need for users to have to worry about an audio
buffering setting.
The new design makes it so that audio from the leaves of the scene graph
flow to the root nodes, and can be intercepted by parent sources. Each
audio source handles its own buffering, and each audio tick a specific
number of audio frames are popped from the front of the circular buffer
on each audio source. Composite sources (such as scenes) can access the
audio for child sources and do custom processing or mixing on that
audio. Composite sources use the audio_render callback of sources to do
synchronous or deferred audio processing per audio tick. Things like
scenes now mix audio from their sub-sources.
2015-12-20 03:06:35 -08:00
|
|
|
bool push_to_talk_active = source->push_to_talk_pressed ||
|
2019-06-22 22:13:45 -07:00
|
|
|
os_time < source->push_to_talk_stop_time;
|
libobs: Implement new audio subsystem
The new audio subsystem fixes two issues:
- First Primary issue it fixes is the ability for parent sources to
intercept the audio of child sources, and do custom processing on
them. The main reason for this was the ability to do custom
cross-fading in transitions, but it's also useful for things such as
side-chain effects, applying audio effects to entire scenes, applying
scene-specific audio filters on sub-sources, and other such
possibilities.
- The secondary issue that needed fixing was audio buffering.
Previously, audio buffering was always a fixed buffer size, so it
would always have exactly a certain number of milliseconds of audio
buffering (and thus output delay). Instead, it now dynamically
increases audio buffering only as necessary, minimizing output delay,
and removing the need for users to have to worry about an audio
buffering setting.
The new design makes it so that audio from the leaves of the scene graph
flow to the root nodes, and can be intercepted by parent sources. Each
audio source handles its own buffering, and each audio tick a specific
number of audio frames are popped from the front of the circular buffer
on each audio source. Composite sources (such as scenes) can access the
audio for child sources and do custom processing or mixing on that
audio. Composite sources use the audio_render callback of sources to do
synchronous or deferred audio processing per audio tick. Things like
scenes now mix audio from their sub-sources.
2015-12-20 03:06:35 -08:00
|
|
|
|
|
|
|
bool muted = !source->enabled || source->muted ||
|
2019-06-22 22:13:45 -07:00
|
|
|
(source->push_to_mute_enabled && push_to_mute_active) ||
|
|
|
|
(source->push_to_talk_enabled && !push_to_talk_active);
|
libobs: Implement new audio subsystem
The new audio subsystem fixes two issues:
- First Primary issue it fixes is the ability for parent sources to
intercept the audio of child sources, and do custom processing on
them. The main reason for this was the ability to do custom
cross-fading in transitions, but it's also useful for things such as
side-chain effects, applying audio effects to entire scenes, applying
scene-specific audio filters on sub-sources, and other such
possibilities.
- The secondary issue that needed fixing was audio buffering.
Previously, audio buffering was always a fixed buffer size, so it
would always have exactly a certain number of milliseconds of audio
buffering (and thus output delay). Instead, it now dynamically
increases audio buffering only as necessary, minimizing output delay,
and removing the need for users to have to worry about an audio
buffering setting.
The new design makes it so that audio from the leaves of the scene graph
flow to the root nodes, and can be intercepted by parent sources. Each
audio source handles its own buffering, and each audio tick a specific
number of audio frames are popped from the front of the circular buffer
on each audio source. Composite sources (such as scenes) can access the
audio for child sources and do custom processing or mixing on that
audio. Composite sources use the audio_render callback of sources to do
synchronous or deferred audio processing per audio tick. Things like
scenes now mix audio from their sub-sources.
2015-12-20 03:06:35 -08:00
|
|
|
|
|
|
|
if (muted || close_float(source->volume, 0.0f, 0.0001f))
|
|
|
|
return 0.0f;
|
|
|
|
if (close_float(source->volume, 1.0f, 0.0001f))
|
|
|
|
return 1.0f;
|
|
|
|
|
|
|
|
return source->volume;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void multiply_output_audio(obs_source_t *source, size_t mix,
|
2019-06-22 22:13:45 -07:00
|
|
|
size_t channels, float vol)
|
libobs: Implement new audio subsystem
The new audio subsystem fixes two issues:
- First Primary issue it fixes is the ability for parent sources to
intercept the audio of child sources, and do custom processing on
them. The main reason for this was the ability to do custom
cross-fading in transitions, but it's also useful for things such as
side-chain effects, applying audio effects to entire scenes, applying
scene-specific audio filters on sub-sources, and other such
possibilities.
- The secondary issue that needed fixing was audio buffering.
Previously, audio buffering was always a fixed buffer size, so it
would always have exactly a certain number of milliseconds of audio
buffering (and thus output delay). Instead, it now dynamically
increases audio buffering only as necessary, minimizing output delay,
and removing the need for users to have to worry about an audio
buffering setting.
The new design makes it so that audio from the leaves of the scene graph
flow to the root nodes, and can be intercepted by parent sources. Each
audio source handles its own buffering, and each audio tick a specific
number of audio frames are popped from the front of the circular buffer
on each audio source. Composite sources (such as scenes) can access the
audio for child sources and do custom processing or mixing on that
audio. Composite sources use the audio_render callback of sources to do
synchronous or deferred audio processing per audio tick. Things like
scenes now mix audio from their sub-sources.
2015-12-20 03:06:35 -08:00
|
|
|
{
|
|
|
|
register float *out = source->audio_output_buf[mix][0];
|
|
|
|
register float *end = out + AUDIO_OUTPUT_FRAMES * channels;
|
|
|
|
|
|
|
|
while (out < end)
|
|
|
|
*(out++) *= vol;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void multiply_vol_data(obs_source_t *source, size_t mix,
|
2019-06-22 22:13:45 -07:00
|
|
|
size_t channels, float *vol_data)
|
libobs: Implement new audio subsystem
The new audio subsystem fixes two issues:
- First Primary issue it fixes is the ability for parent sources to
intercept the audio of child sources, and do custom processing on
them. The main reason for this was the ability to do custom
cross-fading in transitions, but it's also useful for things such as
side-chain effects, applying audio effects to entire scenes, applying
scene-specific audio filters on sub-sources, and other such
possibilities.
- The secondary issue that needed fixing was audio buffering.
Previously, audio buffering was always a fixed buffer size, so it
would always have exactly a certain number of milliseconds of audio
buffering (and thus output delay). Instead, it now dynamically
increases audio buffering only as necessary, minimizing output delay,
and removing the need for users to have to worry about an audio
buffering setting.
The new design makes it so that audio from the leaves of the scene graph
flow to the root nodes, and can be intercepted by parent sources. Each
audio source handles its own buffering, and each audio tick a specific
number of audio frames are popped from the front of the circular buffer
on each audio source. Composite sources (such as scenes) can access the
audio for child sources and do custom processing or mixing on that
audio. Composite sources use the audio_render callback of sources to do
synchronous or deferred audio processing per audio tick. Things like
scenes now mix audio from their sub-sources.
2015-12-20 03:06:35 -08:00
|
|
|
{
|
|
|
|
for (size_t ch = 0; ch < channels; ch++) {
|
|
|
|
register float *out = source->audio_output_buf[mix][ch];
|
|
|
|
register float *end = out + AUDIO_OUTPUT_FRAMES;
|
|
|
|
register float *vol = vol_data;
|
|
|
|
|
|
|
|
while (out < end)
|
|
|
|
*(out++) *= *(vol++);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void apply_audio_action(obs_source_t *source,
|
2019-06-22 22:13:45 -07:00
|
|
|
const struct audio_action *action)
|
libobs: Implement new audio subsystem
The new audio subsystem fixes two issues:
- First Primary issue it fixes is the ability for parent sources to
intercept the audio of child sources, and do custom processing on
them. The main reason for this was the ability to do custom
cross-fading in transitions, but it's also useful for things such as
side-chain effects, applying audio effects to entire scenes, applying
scene-specific audio filters on sub-sources, and other such
possibilities.
- The secondary issue that needed fixing was audio buffering.
Previously, audio buffering was always a fixed buffer size, so it
would always have exactly a certain number of milliseconds of audio
buffering (and thus output delay). Instead, it now dynamically
increases audio buffering only as necessary, minimizing output delay,
and removing the need for users to have to worry about an audio
buffering setting.
The new design makes it so that audio from the leaves of the scene graph
flow to the root nodes, and can be intercepted by parent sources. Each
audio source handles its own buffering, and each audio tick a specific
number of audio frames are popped from the front of the circular buffer
on each audio source. Composite sources (such as scenes) can access the
audio for child sources and do custom processing or mixing on that
audio. Composite sources use the audio_render callback of sources to do
synchronous or deferred audio processing per audio tick. Things like
scenes now mix audio from their sub-sources.
2015-12-20 03:06:35 -08:00
|
|
|
{
|
|
|
|
switch (action->type) {
|
|
|
|
case AUDIO_ACTION_VOL:
|
2019-06-22 22:13:45 -07:00
|
|
|
source->volume = action->vol;
|
|
|
|
break;
|
libobs: Implement new audio subsystem
The new audio subsystem fixes two issues:
- First Primary issue it fixes is the ability for parent sources to
intercept the audio of child sources, and do custom processing on
them. The main reason for this was the ability to do custom
cross-fading in transitions, but it's also useful for things such as
side-chain effects, applying audio effects to entire scenes, applying
scene-specific audio filters on sub-sources, and other such
possibilities.
- The secondary issue that needed fixing was audio buffering.
Previously, audio buffering was always a fixed buffer size, so it
would always have exactly a certain number of milliseconds of audio
buffering (and thus output delay). Instead, it now dynamically
increases audio buffering only as necessary, minimizing output delay,
and removing the need for users to have to worry about an audio
buffering setting.
The new design makes it so that audio from the leaves of the scene graph
flow to the root nodes, and can be intercepted by parent sources. Each
audio source handles its own buffering, and each audio tick a specific
number of audio frames are popped from the front of the circular buffer
on each audio source. Composite sources (such as scenes) can access the
audio for child sources and do custom processing or mixing on that
audio. Composite sources use the audio_render callback of sources to do
synchronous or deferred audio processing per audio tick. Things like
scenes now mix audio from their sub-sources.
2015-12-20 03:06:35 -08:00
|
|
|
case AUDIO_ACTION_MUTE:
|
2019-06-22 22:13:45 -07:00
|
|
|
source->muted = action->set;
|
|
|
|
break;
|
libobs: Implement new audio subsystem
The new audio subsystem fixes two issues:
- First Primary issue it fixes is the ability for parent sources to
intercept the audio of child sources, and do custom processing on
them. The main reason for this was the ability to do custom
cross-fading in transitions, but it's also useful for things such as
side-chain effects, applying audio effects to entire scenes, applying
scene-specific audio filters on sub-sources, and other such
possibilities.
- The secondary issue that needed fixing was audio buffering.
Previously, audio buffering was always a fixed buffer size, so it
would always have exactly a certain number of milliseconds of audio
buffering (and thus output delay). Instead, it now dynamically
increases audio buffering only as necessary, minimizing output delay,
and removing the need for users to have to worry about an audio
buffering setting.
The new design makes it so that audio from the leaves of the scene graph
flow to the root nodes, and can be intercepted by parent sources. Each
audio source handles its own buffering, and each audio tick a specific
number of audio frames are popped from the front of the circular buffer
on each audio source. Composite sources (such as scenes) can access the
audio for child sources and do custom processing or mixing on that
audio. Composite sources use the audio_render callback of sources to do
synchronous or deferred audio processing per audio tick. Things like
scenes now mix audio from their sub-sources.
2015-12-20 03:06:35 -08:00
|
|
|
case AUDIO_ACTION_PTT:
|
2019-06-22 22:13:45 -07:00
|
|
|
source->push_to_talk_pressed = action->set;
|
|
|
|
break;
|
libobs: Implement new audio subsystem
The new audio subsystem fixes two issues:
- First Primary issue it fixes is the ability for parent sources to
intercept the audio of child sources, and do custom processing on
them. The main reason for this was the ability to do custom
cross-fading in transitions, but it's also useful for things such as
side-chain effects, applying audio effects to entire scenes, applying
scene-specific audio filters on sub-sources, and other such
possibilities.
- The secondary issue that needed fixing was audio buffering.
Previously, audio buffering was always a fixed buffer size, so it
would always have exactly a certain number of milliseconds of audio
buffering (and thus output delay). Instead, it now dynamically
increases audio buffering only as necessary, minimizing output delay,
and removing the need for users to have to worry about an audio
buffering setting.
The new design makes it so that audio from the leaves of the scene graph
flow to the root nodes, and can be intercepted by parent sources. Each
audio source handles its own buffering, and each audio tick a specific
number of audio frames are popped from the front of the circular buffer
on each audio source. Composite sources (such as scenes) can access the
audio for child sources and do custom processing or mixing on that
audio. Composite sources use the audio_render callback of sources to do
synchronous or deferred audio processing per audio tick. Things like
scenes now mix audio from their sub-sources.
2015-12-20 03:06:35 -08:00
|
|
|
case AUDIO_ACTION_PTM:
|
2019-06-22 22:13:45 -07:00
|
|
|
source->push_to_mute_pressed = action->set;
|
|
|
|
break;
|
libobs: Implement new audio subsystem
The new audio subsystem fixes two issues:
- First Primary issue it fixes is the ability for parent sources to
intercept the audio of child sources, and do custom processing on
them. The main reason for this was the ability to do custom
cross-fading in transitions, but it's also useful for things such as
side-chain effects, applying audio effects to entire scenes, applying
scene-specific audio filters on sub-sources, and other such
possibilities.
- The secondary issue that needed fixing was audio buffering.
Previously, audio buffering was always a fixed buffer size, so it
would always have exactly a certain number of milliseconds of audio
buffering (and thus output delay). Instead, it now dynamically
increases audio buffering only as necessary, minimizing output delay,
and removing the need for users to have to worry about an audio
buffering setting.
The new design makes it so that audio from the leaves of the scene graph
flow to the root nodes, and can be intercepted by parent sources. Each
audio source handles its own buffering, and each audio tick a specific
number of audio frames are popped from the front of the circular buffer
on each audio source. Composite sources (such as scenes) can access the
audio for child sources and do custom processing or mixing on that
audio. Composite sources use the audio_render callback of sources to do
synchronous or deferred audio processing per audio tick. Things like
scenes now mix audio from their sub-sources.
2015-12-20 03:06:35 -08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void apply_audio_actions(obs_source_t *source, size_t channels,
|
2019-06-22 22:13:45 -07:00
|
|
|
size_t sample_rate)
|
libobs: Implement new audio subsystem
The new audio subsystem fixes two issues:
- First Primary issue it fixes is the ability for parent sources to
intercept the audio of child sources, and do custom processing on
them. The main reason for this was the ability to do custom
cross-fading in transitions, but it's also useful for things such as
side-chain effects, applying audio effects to entire scenes, applying
scene-specific audio filters on sub-sources, and other such
possibilities.
- The secondary issue that needed fixing was audio buffering.
Previously, audio buffering was always a fixed buffer size, so it
would always have exactly a certain number of milliseconds of audio
buffering (and thus output delay). Instead, it now dynamically
increases audio buffering only as necessary, minimizing output delay,
and removing the need for users to have to worry about an audio
buffering setting.
The new design makes it so that audio from the leaves of the scene graph
flow to the root nodes, and can be intercepted by parent sources. Each
audio source handles its own buffering, and each audio tick a specific
number of audio frames are popped from the front of the circular buffer
on each audio source. Composite sources (such as scenes) can access the
audio for child sources and do custom processing or mixing on that
audio. Composite sources use the audio_render callback of sources to do
synchronous or deferred audio processing per audio tick. Things like
scenes now mix audio from their sub-sources.
2015-12-20 03:06:35 -08:00
|
|
|
{
|
|
|
|
float *vol_data = malloc(sizeof(float) * AUDIO_OUTPUT_FRAMES);
|
|
|
|
float cur_vol = get_source_volume(source, source->audio_ts);
|
|
|
|
size_t frame_num = 0;
|
|
|
|
|
|
|
|
pthread_mutex_lock(&source->audio_actions_mutex);
|
|
|
|
|
|
|
|
for (size_t i = 0; i < source->audio_actions.num; i++) {
|
|
|
|
struct audio_action action = source->audio_actions.array[i];
|
|
|
|
uint64_t timestamp = action.timestamp;
|
|
|
|
size_t new_frame_num;
|
|
|
|
|
|
|
|
if (timestamp < source->audio_ts)
|
|
|
|
timestamp = source->audio_ts;
|
|
|
|
|
2019-06-22 22:13:45 -07:00
|
|
|
new_frame_num = conv_time_to_frames(
|
|
|
|
sample_rate, timestamp - source->audio_ts);
|
libobs: Implement new audio subsystem
The new audio subsystem fixes two issues:
- First Primary issue it fixes is the ability for parent sources to
intercept the audio of child sources, and do custom processing on
them. The main reason for this was the ability to do custom
cross-fading in transitions, but it's also useful for things such as
side-chain effects, applying audio effects to entire scenes, applying
scene-specific audio filters on sub-sources, and other such
possibilities.
- The secondary issue that needed fixing was audio buffering.
Previously, audio buffering was always a fixed buffer size, so it
would always have exactly a certain number of milliseconds of audio
buffering (and thus output delay). Instead, it now dynamically
increases audio buffering only as necessary, minimizing output delay,
and removing the need for users to have to worry about an audio
buffering setting.
The new design makes it so that audio from the leaves of the scene graph
flow to the root nodes, and can be intercepted by parent sources. Each
audio source handles its own buffering, and each audio tick a specific
number of audio frames are popped from the front of the circular buffer
on each audio source. Composite sources (such as scenes) can access the
audio for child sources and do custom processing or mixing on that
audio. Composite sources use the audio_render callback of sources to do
synchronous or deferred audio processing per audio tick. Things like
scenes now mix audio from their sub-sources.
2015-12-20 03:06:35 -08:00
|
|
|
|
|
|
|
if (new_frame_num >= AUDIO_OUTPUT_FRAMES)
|
|
|
|
break;
|
|
|
|
|
|
|
|
da_erase(source->audio_actions, i--);
|
|
|
|
|
|
|
|
apply_audio_action(source, &action);
|
|
|
|
|
|
|
|
if (new_frame_num > frame_num) {
|
|
|
|
for (; frame_num < new_frame_num; frame_num++)
|
|
|
|
vol_data[frame_num] = cur_vol;
|
|
|
|
}
|
|
|
|
|
|
|
|
cur_vol = get_source_volume(source, timestamp);
|
|
|
|
}
|
|
|
|
|
|
|
|
for (; frame_num < AUDIO_OUTPUT_FRAMES; frame_num++)
|
|
|
|
vol_data[frame_num] = cur_vol;
|
|
|
|
|
|
|
|
pthread_mutex_unlock(&source->audio_actions_mutex);
|
|
|
|
|
|
|
|
for (size_t mix = 0; mix < MAX_AUDIO_MIXES; mix++) {
|
|
|
|
if ((source->audio_mixers & (1 << mix)) != 0)
|
|
|
|
multiply_vol_data(source, mix, channels, vol_data);
|
|
|
|
}
|
|
|
|
|
|
|
|
free(vol_data);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void apply_audio_volume(obs_source_t *source, uint32_t mixers,
|
2019-06-22 22:13:45 -07:00
|
|
|
size_t channels, size_t sample_rate)
|
libobs: Implement new audio subsystem
The new audio subsystem fixes two issues:
- First Primary issue it fixes is the ability for parent sources to
intercept the audio of child sources, and do custom processing on
them. The main reason for this was the ability to do custom
cross-fading in transitions, but it's also useful for things such as
side-chain effects, applying audio effects to entire scenes, applying
scene-specific audio filters on sub-sources, and other such
possibilities.
- The secondary issue that needed fixing was audio buffering.
Previously, audio buffering was always a fixed buffer size, so it
would always have exactly a certain number of milliseconds of audio
buffering (and thus output delay). Instead, it now dynamically
increases audio buffering only as necessary, minimizing output delay,
and removing the need for users to have to worry about an audio
buffering setting.
The new design makes it so that audio from the leaves of the scene graph
flow to the root nodes, and can be intercepted by parent sources. Each
audio source handles its own buffering, and each audio tick a specific
number of audio frames are popped from the front of the circular buffer
on each audio source. Composite sources (such as scenes) can access the
audio for child sources and do custom processing or mixing on that
audio. Composite sources use the audio_render callback of sources to do
synchronous or deferred audio processing per audio tick. Things like
scenes now mix audio from their sub-sources.
2015-12-20 03:06:35 -08:00
|
|
|
{
|
|
|
|
struct audio_action action;
|
|
|
|
bool actions_pending;
|
|
|
|
float vol;
|
|
|
|
|
|
|
|
pthread_mutex_lock(&source->audio_actions_mutex);
|
|
|
|
|
|
|
|
actions_pending = source->audio_actions.num > 0;
|
|
|
|
if (actions_pending)
|
|
|
|
action = source->audio_actions.array[0];
|
|
|
|
|
|
|
|
pthread_mutex_unlock(&source->audio_actions_mutex);
|
|
|
|
|
|
|
|
if (actions_pending) {
|
2019-06-22 22:13:45 -07:00
|
|
|
uint64_t duration =
|
|
|
|
conv_frames_to_time(sample_rate, AUDIO_OUTPUT_FRAMES);
|
libobs: Implement new audio subsystem
The new audio subsystem fixes two issues:
- First Primary issue it fixes is the ability for parent sources to
intercept the audio of child sources, and do custom processing on
them. The main reason for this was the ability to do custom
cross-fading in transitions, but it's also useful for things such as
side-chain effects, applying audio effects to entire scenes, applying
scene-specific audio filters on sub-sources, and other such
possibilities.
- The secondary issue that needed fixing was audio buffering.
Previously, audio buffering was always a fixed buffer size, so it
would always have exactly a certain number of milliseconds of audio
buffering (and thus output delay). Instead, it now dynamically
increases audio buffering only as necessary, minimizing output delay,
and removing the need for users to have to worry about an audio
buffering setting.
The new design makes it so that audio from the leaves of the scene graph
flow to the root nodes, and can be intercepted by parent sources. Each
audio source handles its own buffering, and each audio tick a specific
number of audio frames are popped from the front of the circular buffer
on each audio source. Composite sources (such as scenes) can access the
audio for child sources and do custom processing or mixing on that
audio. Composite sources use the audio_render callback of sources to do
synchronous or deferred audio processing per audio tick. Things like
scenes now mix audio from their sub-sources.
2015-12-20 03:06:35 -08:00
|
|
|
|
|
|
|
if (action.timestamp < (source->audio_ts + duration)) {
|
|
|
|
apply_audio_actions(source, channels, sample_rate);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
vol = get_source_volume(source, source->audio_ts);
|
|
|
|
if (vol == 1.0f)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (vol == 0.0f || mixers == 0) {
|
|
|
|
memset(source->audio_output_buf[0][0], 0,
|
2019-06-22 22:13:45 -07:00
|
|
|
AUDIO_OUTPUT_FRAMES * sizeof(float) *
|
|
|
|
MAX_AUDIO_CHANNELS * MAX_AUDIO_MIXES);
|
libobs: Implement new audio subsystem
The new audio subsystem fixes two issues:
- First Primary issue it fixes is the ability for parent sources to
intercept the audio of child sources, and do custom processing on
them. The main reason for this was the ability to do custom
cross-fading in transitions, but it's also useful for things such as
side-chain effects, applying audio effects to entire scenes, applying
scene-specific audio filters on sub-sources, and other such
possibilities.
- The secondary issue that needed fixing was audio buffering.
Previously, audio buffering was always a fixed buffer size, so it
would always have exactly a certain number of milliseconds of audio
buffering (and thus output delay). Instead, it now dynamically
increases audio buffering only as necessary, minimizing output delay,
and removing the need for users to have to worry about an audio
buffering setting.
The new design makes it so that audio from the leaves of the scene graph
flow to the root nodes, and can be intercepted by parent sources. Each
audio source handles its own buffering, and each audio tick a specific
number of audio frames are popped from the front of the circular buffer
on each audio source. Composite sources (such as scenes) can access the
audio for child sources and do custom processing or mixing on that
audio. Composite sources use the audio_render callback of sources to do
synchronous or deferred audio processing per audio tick. Things like
scenes now mix audio from their sub-sources.
2015-12-20 03:06:35 -08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (size_t mix = 0; mix < MAX_AUDIO_MIXES; mix++) {
|
|
|
|
uint32_t mix_and_val = (1 << mix);
|
|
|
|
if ((source->audio_mixers & mix_and_val) != 0 &&
|
|
|
|
(mixers & mix_and_val) != 0)
|
|
|
|
multiply_output_audio(source, mix, channels, vol);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void custom_audio_render(obs_source_t *source, uint32_t mixers,
|
2019-06-22 22:13:45 -07:00
|
|
|
size_t channels, size_t sample_rate)
|
libobs: Implement new audio subsystem
The new audio subsystem fixes two issues:
- First Primary issue it fixes is the ability for parent sources to
intercept the audio of child sources, and do custom processing on
them. The main reason for this was the ability to do custom
cross-fading in transitions, but it's also useful for things such as
side-chain effects, applying audio effects to entire scenes, applying
scene-specific audio filters on sub-sources, and other such
possibilities.
- The secondary issue that needed fixing was audio buffering.
Previously, audio buffering was always a fixed buffer size, so it
would always have exactly a certain number of milliseconds of audio
buffering (and thus output delay). Instead, it now dynamically
increases audio buffering only as necessary, minimizing output delay,
and removing the need for users to have to worry about an audio
buffering setting.
The new design makes it so that audio from the leaves of the scene graph
flow to the root nodes, and can be intercepted by parent sources. Each
audio source handles its own buffering, and each audio tick a specific
number of audio frames are popped from the front of the circular buffer
on each audio source. Composite sources (such as scenes) can access the
audio for child sources and do custom processing or mixing on that
audio. Composite sources use the audio_render callback of sources to do
synchronous or deferred audio processing per audio tick. Things like
scenes now mix audio from their sub-sources.
2015-12-20 03:06:35 -08:00
|
|
|
{
|
|
|
|
struct obs_source_audio_mix audio_data;
|
|
|
|
bool success;
|
|
|
|
uint64_t ts;
|
|
|
|
|
|
|
|
for (size_t mix = 0; mix < MAX_AUDIO_MIXES; mix++) {
|
2017-12-25 14:55:25 -08:00
|
|
|
for (size_t ch = 0; ch < channels; ch++) {
|
libobs: Implement new audio subsystem
The new audio subsystem fixes two issues:
- First Primary issue it fixes is the ability for parent sources to
intercept the audio of child sources, and do custom processing on
them. The main reason for this was the ability to do custom
cross-fading in transitions, but it's also useful for things such as
side-chain effects, applying audio effects to entire scenes, applying
scene-specific audio filters on sub-sources, and other such
possibilities.
- The secondary issue that needed fixing was audio buffering.
Previously, audio buffering was always a fixed buffer size, so it
would always have exactly a certain number of milliseconds of audio
buffering (and thus output delay). Instead, it now dynamically
increases audio buffering only as necessary, minimizing output delay,
and removing the need for users to have to worry about an audio
buffering setting.
The new design makes it so that audio from the leaves of the scene graph
flow to the root nodes, and can be intercepted by parent sources. Each
audio source handles its own buffering, and each audio tick a specific
number of audio frames are popped from the front of the circular buffer
on each audio source. Composite sources (such as scenes) can access the
audio for child sources and do custom processing or mixing on that
audio. Composite sources use the audio_render callback of sources to do
synchronous or deferred audio processing per audio tick. Things like
scenes now mix audio from their sub-sources.
2015-12-20 03:06:35 -08:00
|
|
|
audio_data.output[mix].data[ch] =
|
|
|
|
source->audio_output_buf[mix][ch];
|
2017-12-25 14:55:25 -08:00
|
|
|
}
|
libobs: Implement new audio subsystem
The new audio subsystem fixes two issues:
- First Primary issue it fixes is the ability for parent sources to
intercept the audio of child sources, and do custom processing on
them. The main reason for this was the ability to do custom
cross-fading in transitions, but it's also useful for things such as
side-chain effects, applying audio effects to entire scenes, applying
scene-specific audio filters on sub-sources, and other such
possibilities.
- The secondary issue that needed fixing was audio buffering.
Previously, audio buffering was always a fixed buffer size, so it
would always have exactly a certain number of milliseconds of audio
buffering (and thus output delay). Instead, it now dynamically
increases audio buffering only as necessary, minimizing output delay,
and removing the need for users to have to worry about an audio
buffering setting.
The new design makes it so that audio from the leaves of the scene graph
flow to the root nodes, and can be intercepted by parent sources. Each
audio source handles its own buffering, and each audio tick a specific
number of audio frames are popped from the front of the circular buffer
on each audio source. Composite sources (such as scenes) can access the
audio for child sources and do custom processing or mixing on that
audio. Composite sources use the audio_render callback of sources to do
synchronous or deferred audio processing per audio tick. Things like
scenes now mix audio from their sub-sources.
2015-12-20 03:06:35 -08:00
|
|
|
|
2017-12-25 14:55:25 -08:00
|
|
|
if ((source->audio_mixers & mixers & (1 << mix)) != 0) {
|
|
|
|
memset(source->audio_output_buf[mix][0], 0,
|
2019-06-22 22:13:45 -07:00
|
|
|
sizeof(float) * AUDIO_OUTPUT_FRAMES * channels);
|
2017-12-25 14:55:25 -08:00
|
|
|
}
|
|
|
|
}
|
libobs: Implement new audio subsystem
The new audio subsystem fixes two issues:
- First Primary issue it fixes is the ability for parent sources to
intercept the audio of child sources, and do custom processing on
them. The main reason for this was the ability to do custom
cross-fading in transitions, but it's also useful for things such as
side-chain effects, applying audio effects to entire scenes, applying
scene-specific audio filters on sub-sources, and other such
possibilities.
- The secondary issue that needed fixing was audio buffering.
Previously, audio buffering was always a fixed buffer size, so it
would always have exactly a certain number of milliseconds of audio
buffering (and thus output delay). Instead, it now dynamically
increases audio buffering only as necessary, minimizing output delay,
and removing the need for users to have to worry about an audio
buffering setting.
The new design makes it so that audio from the leaves of the scene graph
flow to the root nodes, and can be intercepted by parent sources. Each
audio source handles its own buffering, and each audio tick a specific
number of audio frames are popped from the front of the circular buffer
on each audio source. Composite sources (such as scenes) can access the
audio for child sources and do custom processing or mixing on that
audio. Composite sources use the audio_render callback of sources to do
synchronous or deferred audio processing per audio tick. Things like
scenes now mix audio from their sub-sources.
2015-12-20 03:06:35 -08:00
|
|
|
|
|
|
|
success = source->info.audio_render(source->context.data, &ts,
|
2019-06-22 22:13:45 -07:00
|
|
|
&audio_data, mixers, channels,
|
|
|
|
sample_rate);
|
libobs: Implement new audio subsystem
The new audio subsystem fixes two issues:
- First Primary issue it fixes is the ability for parent sources to
intercept the audio of child sources, and do custom processing on
them. The main reason for this was the ability to do custom
cross-fading in transitions, but it's also useful for things such as
side-chain effects, applying audio effects to entire scenes, applying
scene-specific audio filters on sub-sources, and other such
possibilities.
- The secondary issue that needed fixing was audio buffering.
Previously, audio buffering was always a fixed buffer size, so it
would always have exactly a certain number of milliseconds of audio
buffering (and thus output delay). Instead, it now dynamically
increases audio buffering only as necessary, minimizing output delay,
and removing the need for users to have to worry about an audio
buffering setting.
The new design makes it so that audio from the leaves of the scene graph
flow to the root nodes, and can be intercepted by parent sources. Each
audio source handles its own buffering, and each audio tick a specific
number of audio frames are popped from the front of the circular buffer
on each audio source. Composite sources (such as scenes) can access the
audio for child sources and do custom processing or mixing on that
audio. Composite sources use the audio_render callback of sources to do
synchronous or deferred audio processing per audio tick. Things like
scenes now mix audio from their sub-sources.
2015-12-20 03:06:35 -08:00
|
|
|
source->audio_ts = success ? ts : 0;
|
|
|
|
source->audio_pending = !success;
|
|
|
|
|
|
|
|
if (!success || !source->audio_ts || !mixers)
|
|
|
|
return;
|
|
|
|
|
|
|
|
for (size_t mix = 0; mix < MAX_AUDIO_MIXES; mix++) {
|
2018-01-16 12:02:00 -08:00
|
|
|
uint32_t mix_bit = 1 << mix;
|
|
|
|
|
|
|
|
if ((mixers & mix_bit) == 0)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if ((source->audio_mixers & mix_bit) == 0) {
|
libobs: Implement new audio subsystem
The new audio subsystem fixes two issues:
- First Primary issue it fixes is the ability for parent sources to
intercept the audio of child sources, and do custom processing on
them. The main reason for this was the ability to do custom
cross-fading in transitions, but it's also useful for things such as
side-chain effects, applying audio effects to entire scenes, applying
scene-specific audio filters on sub-sources, and other such
possibilities.
- The secondary issue that needed fixing was audio buffering.
Previously, audio buffering was always a fixed buffer size, so it
would always have exactly a certain number of milliseconds of audio
buffering (and thus output delay). Instead, it now dynamically
increases audio buffering only as necessary, minimizing output delay,
and removing the need for users to have to worry about an audio
buffering setting.
The new design makes it so that audio from the leaves of the scene graph
flow to the root nodes, and can be intercepted by parent sources. Each
audio source handles its own buffering, and each audio tick a specific
number of audio frames are popped from the front of the circular buffer
on each audio source. Composite sources (such as scenes) can access the
audio for child sources and do custom processing or mixing on that
audio. Composite sources use the audio_render callback of sources to do
synchronous or deferred audio processing per audio tick. Things like
scenes now mix audio from their sub-sources.
2015-12-20 03:06:35 -08:00
|
|
|
memset(source->audio_output_buf[mix][0], 0,
|
2019-06-22 22:13:45 -07:00
|
|
|
sizeof(float) * AUDIO_OUTPUT_FRAMES * channels);
|
libobs: Implement new audio subsystem
The new audio subsystem fixes two issues:
- First Primary issue it fixes is the ability for parent sources to
intercept the audio of child sources, and do custom processing on
them. The main reason for this was the ability to do custom
cross-fading in transitions, but it's also useful for things such as
side-chain effects, applying audio effects to entire scenes, applying
scene-specific audio filters on sub-sources, and other such
possibilities.
- The secondary issue that needed fixing was audio buffering.
Previously, audio buffering was always a fixed buffer size, so it
would always have exactly a certain number of milliseconds of audio
buffering (and thus output delay). Instead, it now dynamically
increases audio buffering only as necessary, minimizing output delay,
and removing the need for users to have to worry about an audio
buffering setting.
The new design makes it so that audio from the leaves of the scene graph
flow to the root nodes, and can be intercepted by parent sources. Each
audio source handles its own buffering, and each audio tick a specific
number of audio frames are popped from the front of the circular buffer
on each audio source. Composite sources (such as scenes) can access the
audio for child sources and do custom processing or mixing on that
audio. Composite sources use the audio_render callback of sources to do
synchronous or deferred audio processing per audio tick. Things like
scenes now mix audio from their sub-sources.
2015-12-20 03:06:35 -08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
apply_audio_volume(source, mixers, channels, sample_rate);
|
|
|
|
}
|
|
|
|
|
2019-08-31 01:12:58 -07:00
|
|
|
static void audio_submix(obs_source_t *source, size_t channels,
|
2019-08-21 14:35:40 -07:00
|
|
|
size_t sample_rate)
|
|
|
|
{
|
|
|
|
struct audio_output_data audio_data;
|
|
|
|
struct obs_source_audio audio = {0};
|
|
|
|
bool success;
|
|
|
|
uint64_t ts;
|
|
|
|
|
2019-08-31 01:12:58 -07:00
|
|
|
for (size_t ch = 0; ch < channels; ch++) {
|
|
|
|
audio_data.data[ch] = source->audio_mix_buf[ch];
|
2019-08-21 14:35:40 -07:00
|
|
|
}
|
|
|
|
|
2019-08-31 01:12:58 -07:00
|
|
|
memset(source->audio_mix_buf[0], 0,
|
|
|
|
sizeof(float) * AUDIO_OUTPUT_FRAMES * channels);
|
|
|
|
|
2019-08-21 14:35:40 -07:00
|
|
|
success = source->info.audio_mix(source->context.data, &ts, &audio_data,
|
|
|
|
channels, sample_rate);
|
|
|
|
|
|
|
|
if (!success)
|
|
|
|
return;
|
|
|
|
|
|
|
|
for (size_t i = 0; i < channels; i++)
|
|
|
|
audio.data[i] = (const uint8_t *)audio_data.data[i];
|
|
|
|
|
|
|
|
audio.samples_per_sec = (uint32_t)sample_rate;
|
|
|
|
audio.frames = AUDIO_OUTPUT_FRAMES;
|
|
|
|
audio.format = AUDIO_FORMAT_FLOAT_PLANAR;
|
|
|
|
audio.speakers = (enum speaker_layout)channels;
|
|
|
|
audio.timestamp = ts;
|
|
|
|
|
|
|
|
obs_source_output_audio(source, &audio);
|
|
|
|
}
|
|
|
|
|
libobs: Implement new audio subsystem
The new audio subsystem fixes two issues:
- First Primary issue it fixes is the ability for parent sources to
intercept the audio of child sources, and do custom processing on
them. The main reason for this was the ability to do custom
cross-fading in transitions, but it's also useful for things such as
side-chain effects, applying audio effects to entire scenes, applying
scene-specific audio filters on sub-sources, and other such
possibilities.
- The secondary issue that needed fixing was audio buffering.
Previously, audio buffering was always a fixed buffer size, so it
would always have exactly a certain number of milliseconds of audio
buffering (and thus output delay). Instead, it now dynamically
increases audio buffering only as necessary, minimizing output delay,
and removing the need for users to have to worry about an audio
buffering setting.
The new design makes it so that audio from the leaves of the scene graph
flow to the root nodes, and can be intercepted by parent sources. Each
audio source handles its own buffering, and each audio tick a specific
number of audio frames are popped from the front of the circular buffer
on each audio source. Composite sources (such as scenes) can access the
audio for child sources and do custom processing or mixing on that
audio. Composite sources use the audio_render callback of sources to do
synchronous or deferred audio processing per audio tick. Things like
scenes now mix audio from their sub-sources.
2015-12-20 03:06:35 -08:00
|
|
|
static inline void process_audio_source_tick(obs_source_t *source,
|
2019-06-22 22:13:45 -07:00
|
|
|
uint32_t mixers, size_t channels,
|
|
|
|
size_t sample_rate, size_t size)
|
libobs: Implement new audio subsystem
The new audio subsystem fixes two issues:
- First Primary issue it fixes is the ability for parent sources to
intercept the audio of child sources, and do custom processing on
them. The main reason for this was the ability to do custom
cross-fading in transitions, but it's also useful for things such as
side-chain effects, applying audio effects to entire scenes, applying
scene-specific audio filters on sub-sources, and other such
possibilities.
- The secondary issue that needed fixing was audio buffering.
Previously, audio buffering was always a fixed buffer size, so it
would always have exactly a certain number of milliseconds of audio
buffering (and thus output delay). Instead, it now dynamically
increases audio buffering only as necessary, minimizing output delay,
and removing the need for users to have to worry about an audio
buffering setting.
The new design makes it so that audio from the leaves of the scene graph
flow to the root nodes, and can be intercepted by parent sources. Each
audio source handles its own buffering, and each audio tick a specific
number of audio frames are popped from the front of the circular buffer
on each audio source. Composite sources (such as scenes) can access the
audio for child sources and do custom processing or mixing on that
audio. Composite sources use the audio_render callback of sources to do
synchronous or deferred audio processing per audio tick. Things like
scenes now mix audio from their sub-sources.
2015-12-20 03:06:35 -08:00
|
|
|
{
|
2019-08-21 14:35:40 -07:00
|
|
|
bool audio_submix = !!(source->info.output_flags & OBS_SOURCE_SUBMIX);
|
|
|
|
|
libobs: Implement new audio subsystem
The new audio subsystem fixes two issues:
- First Primary issue it fixes is the ability for parent sources to
intercept the audio of child sources, and do custom processing on
them. The main reason for this was the ability to do custom
cross-fading in transitions, but it's also useful for things such as
side-chain effects, applying audio effects to entire scenes, applying
scene-specific audio filters on sub-sources, and other such
possibilities.
- The secondary issue that needed fixing was audio buffering.
Previously, audio buffering was always a fixed buffer size, so it
would always have exactly a certain number of milliseconds of audio
buffering (and thus output delay). Instead, it now dynamically
increases audio buffering only as necessary, minimizing output delay,
and removing the need for users to have to worry about an audio
buffering setting.
The new design makes it so that audio from the leaves of the scene graph
flow to the root nodes, and can be intercepted by parent sources. Each
audio source handles its own buffering, and each audio tick a specific
number of audio frames are popped from the front of the circular buffer
on each audio source. Composite sources (such as scenes) can access the
audio for child sources and do custom processing or mixing on that
audio. Composite sources use the audio_render callback of sources to do
synchronous or deferred audio processing per audio tick. Things like
scenes now mix audio from their sub-sources.
2015-12-20 03:06:35 -08:00
|
|
|
pthread_mutex_lock(&source->audio_buf_mutex);
|
|
|
|
|
2016-01-30 08:50:31 -08:00
|
|
|
if (source->audio_input_buf[0].size < size) {
|
|
|
|
source->audio_pending = true;
|
|
|
|
pthread_mutex_unlock(&source->audio_buf_mutex);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
libobs: Implement new audio subsystem
The new audio subsystem fixes two issues:
- First Primary issue it fixes is the ability for parent sources to
intercept the audio of child sources, and do custom processing on
them. The main reason for this was the ability to do custom
cross-fading in transitions, but it's also useful for things such as
side-chain effects, applying audio effects to entire scenes, applying
scene-specific audio filters on sub-sources, and other such
possibilities.
- The secondary issue that needed fixing was audio buffering.
Previously, audio buffering was always a fixed buffer size, so it
would always have exactly a certain number of milliseconds of audio
buffering (and thus output delay). Instead, it now dynamically
increases audio buffering only as necessary, minimizing output delay,
and removing the need for users to have to worry about an audio
buffering setting.
The new design makes it so that audio from the leaves of the scene graph
flow to the root nodes, and can be intercepted by parent sources. Each
audio source handles its own buffering, and each audio tick a specific
number of audio frames are popped from the front of the circular buffer
on each audio source. Composite sources (such as scenes) can access the
audio for child sources and do custom processing or mixing on that
audio. Composite sources use the audio_render callback of sources to do
synchronous or deferred audio processing per audio tick. Things like
scenes now mix audio from their sub-sources.
2015-12-20 03:06:35 -08:00
|
|
|
for (size_t ch = 0; ch < channels; ch++)
|
|
|
|
circlebuf_peek_front(&source->audio_input_buf[ch],
|
2019-06-22 22:13:45 -07:00
|
|
|
source->audio_output_buf[0][ch], size);
|
libobs: Implement new audio subsystem
The new audio subsystem fixes two issues:
- First Primary issue it fixes is the ability for parent sources to
intercept the audio of child sources, and do custom processing on
them. The main reason for this was the ability to do custom
cross-fading in transitions, but it's also useful for things such as
side-chain effects, applying audio effects to entire scenes, applying
scene-specific audio filters on sub-sources, and other such
possibilities.
- The secondary issue that needed fixing was audio buffering.
Previously, audio buffering was always a fixed buffer size, so it
would always have exactly a certain number of milliseconds of audio
buffering (and thus output delay). Instead, it now dynamically
increases audio buffering only as necessary, minimizing output delay,
and removing the need for users to have to worry about an audio
buffering setting.
The new design makes it so that audio from the leaves of the scene graph
flow to the root nodes, and can be intercepted by parent sources. Each
audio source handles its own buffering, and each audio tick a specific
number of audio frames are popped from the front of the circular buffer
on each audio source. Composite sources (such as scenes) can access the
audio for child sources and do custom processing or mixing on that
audio. Composite sources use the audio_render callback of sources to do
synchronous or deferred audio processing per audio tick. Things like
scenes now mix audio from their sub-sources.
2015-12-20 03:06:35 -08:00
|
|
|
|
|
|
|
pthread_mutex_unlock(&source->audio_buf_mutex);
|
|
|
|
|
|
|
|
for (size_t mix = 1; mix < MAX_AUDIO_MIXES; mix++) {
|
|
|
|
uint32_t mix_and_val = (1 << mix);
|
|
|
|
|
2019-08-21 14:35:40 -07:00
|
|
|
if (audio_submix) {
|
|
|
|
if (mix > 1)
|
|
|
|
break;
|
|
|
|
|
|
|
|
mixers = 1;
|
|
|
|
mix_and_val = 1;
|
|
|
|
}
|
|
|
|
|
libobs: Implement new audio subsystem
The new audio subsystem fixes two issues:
- First Primary issue it fixes is the ability for parent sources to
intercept the audio of child sources, and do custom processing on
them. The main reason for this was the ability to do custom
cross-fading in transitions, but it's also useful for things such as
side-chain effects, applying audio effects to entire scenes, applying
scene-specific audio filters on sub-sources, and other such
possibilities.
- The secondary issue that needed fixing was audio buffering.
Previously, audio buffering was always a fixed buffer size, so it
would always have exactly a certain number of milliseconds of audio
buffering (and thus output delay). Instead, it now dynamically
increases audio buffering only as necessary, minimizing output delay,
and removing the need for users to have to worry about an audio
buffering setting.
The new design makes it so that audio from the leaves of the scene graph
flow to the root nodes, and can be intercepted by parent sources. Each
audio source handles its own buffering, and each audio tick a specific
number of audio frames are popped from the front of the circular buffer
on each audio source. Composite sources (such as scenes) can access the
audio for child sources and do custom processing or mixing on that
audio. Composite sources use the audio_render callback of sources to do
synchronous or deferred audio processing per audio tick. Things like
scenes now mix audio from their sub-sources.
2015-12-20 03:06:35 -08:00
|
|
|
if ((source->audio_mixers & mix_and_val) == 0 ||
|
|
|
|
(mixers & mix_and_val) == 0) {
|
2019-06-22 22:13:45 -07:00
|
|
|
memset(source->audio_output_buf[mix][0], 0,
|
|
|
|
size * channels);
|
libobs: Implement new audio subsystem
The new audio subsystem fixes two issues:
- First Primary issue it fixes is the ability for parent sources to
intercept the audio of child sources, and do custom processing on
them. The main reason for this was the ability to do custom
cross-fading in transitions, but it's also useful for things such as
side-chain effects, applying audio effects to entire scenes, applying
scene-specific audio filters on sub-sources, and other such
possibilities.
- The secondary issue that needed fixing was audio buffering.
Previously, audio buffering was always a fixed buffer size, so it
would always have exactly a certain number of milliseconds of audio
buffering (and thus output delay). Instead, it now dynamically
increases audio buffering only as necessary, minimizing output delay,
and removing the need for users to have to worry about an audio
buffering setting.
The new design makes it so that audio from the leaves of the scene graph
flow to the root nodes, and can be intercepted by parent sources. Each
audio source handles its own buffering, and each audio tick a specific
number of audio frames are popped from the front of the circular buffer
on each audio source. Composite sources (such as scenes) can access the
audio for child sources and do custom processing or mixing on that
audio. Composite sources use the audio_render callback of sources to do
synchronous or deferred audio processing per audio tick. Things like
scenes now mix audio from their sub-sources.
2015-12-20 03:06:35 -08:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (size_t ch = 0; ch < channels; ch++)
|
|
|
|
memcpy(source->audio_output_buf[mix][ch],
|
2019-06-22 22:13:45 -07:00
|
|
|
source->audio_output_buf[0][ch], size);
|
libobs: Implement new audio subsystem
The new audio subsystem fixes two issues:
- First Primary issue it fixes is the ability for parent sources to
intercept the audio of child sources, and do custom processing on
them. The main reason for this was the ability to do custom
cross-fading in transitions, but it's also useful for things such as
side-chain effects, applying audio effects to entire scenes, applying
scene-specific audio filters on sub-sources, and other such
possibilities.
- The secondary issue that needed fixing was audio buffering.
Previously, audio buffering was always a fixed buffer size, so it
would always have exactly a certain number of milliseconds of audio
buffering (and thus output delay). Instead, it now dynamically
increases audio buffering only as necessary, minimizing output delay,
and removing the need for users to have to worry about an audio
buffering setting.
The new design makes it so that audio from the leaves of the scene graph
flow to the root nodes, and can be intercepted by parent sources. Each
audio source handles its own buffering, and each audio tick a specific
number of audio frames are popped from the front of the circular buffer
on each audio source. Composite sources (such as scenes) can access the
audio for child sources and do custom processing or mixing on that
audio. Composite sources use the audio_render callback of sources to do
synchronous or deferred audio processing per audio tick. Things like
scenes now mix audio from their sub-sources.
2015-12-20 03:06:35 -08:00
|
|
|
}
|
|
|
|
|
2019-08-21 14:35:40 -07:00
|
|
|
if (audio_submix) {
|
|
|
|
source->audio_pending = false;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
libobs: Implement new audio subsystem
The new audio subsystem fixes two issues:
- First Primary issue it fixes is the ability for parent sources to
intercept the audio of child sources, and do custom processing on
them. The main reason for this was the ability to do custom
cross-fading in transitions, but it's also useful for things such as
side-chain effects, applying audio effects to entire scenes, applying
scene-specific audio filters on sub-sources, and other such
possibilities.
- The secondary issue that needed fixing was audio buffering.
Previously, audio buffering was always a fixed buffer size, so it
would always have exactly a certain number of milliseconds of audio
buffering (and thus output delay). Instead, it now dynamically
increases audio buffering only as necessary, minimizing output delay,
and removing the need for users to have to worry about an audio
buffering setting.
The new design makes it so that audio from the leaves of the scene graph
flow to the root nodes, and can be intercepted by parent sources. Each
audio source handles its own buffering, and each audio tick a specific
number of audio frames are popped from the front of the circular buffer
on each audio source. Composite sources (such as scenes) can access the
audio for child sources and do custom processing or mixing on that
audio. Composite sources use the audio_render callback of sources to do
synchronous or deferred audio processing per audio tick. Things like
scenes now mix audio from their sub-sources.
2015-12-20 03:06:35 -08:00
|
|
|
if ((source->audio_mixers & 1) == 0 || (mixers & 1) == 0)
|
2019-06-22 22:13:45 -07:00
|
|
|
memset(source->audio_output_buf[0][0], 0, size * channels);
|
libobs: Implement new audio subsystem
The new audio subsystem fixes two issues:
- First Primary issue it fixes is the ability for parent sources to
intercept the audio of child sources, and do custom processing on
them. The main reason for this was the ability to do custom
cross-fading in transitions, but it's also useful for things such as
side-chain effects, applying audio effects to entire scenes, applying
scene-specific audio filters on sub-sources, and other such
possibilities.
- The secondary issue that needed fixing was audio buffering.
Previously, audio buffering was always a fixed buffer size, so it
would always have exactly a certain number of milliseconds of audio
buffering (and thus output delay). Instead, it now dynamically
increases audio buffering only as necessary, minimizing output delay,
and removing the need for users to have to worry about an audio
buffering setting.
The new design makes it so that audio from the leaves of the scene graph
flow to the root nodes, and can be intercepted by parent sources. Each
audio source handles its own buffering, and each audio tick a specific
number of audio frames are popped from the front of the circular buffer
on each audio source. Composite sources (such as scenes) can access the
audio for child sources and do custom processing or mixing on that
audio. Composite sources use the audio_render callback of sources to do
synchronous or deferred audio processing per audio tick. Things like
scenes now mix audio from their sub-sources.
2015-12-20 03:06:35 -08:00
|
|
|
|
|
|
|
apply_audio_volume(source, mixers, channels, sample_rate);
|
|
|
|
source->audio_pending = false;
|
|
|
|
}
|
|
|
|
|
|
|
|
void obs_source_audio_render(obs_source_t *source, uint32_t mixers,
|
2019-06-22 22:13:45 -07:00
|
|
|
size_t channels, size_t sample_rate, size_t size)
|
libobs: Implement new audio subsystem
The new audio subsystem fixes two issues:
- First Primary issue it fixes is the ability for parent sources to
intercept the audio of child sources, and do custom processing on
them. The main reason for this was the ability to do custom
cross-fading in transitions, but it's also useful for things such as
side-chain effects, applying audio effects to entire scenes, applying
scene-specific audio filters on sub-sources, and other such
possibilities.
- The secondary issue that needed fixing was audio buffering.
Previously, audio buffering was always a fixed buffer size, so it
would always have exactly a certain number of milliseconds of audio
buffering (and thus output delay). Instead, it now dynamically
increases audio buffering only as necessary, minimizing output delay,
and removing the need for users to have to worry about an audio
buffering setting.
The new design makes it so that audio from the leaves of the scene graph
flow to the root nodes, and can be intercepted by parent sources. Each
audio source handles its own buffering, and each audio tick a specific
number of audio frames are popped from the front of the circular buffer
on each audio source. Composite sources (such as scenes) can access the
audio for child sources and do custom processing or mixing on that
audio. Composite sources use the audio_render callback of sources to do
synchronous or deferred audio processing per audio tick. Things like
scenes now mix audio from their sub-sources.
2015-12-20 03:06:35 -08:00
|
|
|
{
|
2016-04-12 17:19:47 -07:00
|
|
|
if (!source->audio_output_buf[0][0]) {
|
libobs: Implement new audio subsystem
The new audio subsystem fixes two issues:
- First Primary issue it fixes is the ability for parent sources to
intercept the audio of child sources, and do custom processing on
them. The main reason for this was the ability to do custom
cross-fading in transitions, but it's also useful for things such as
side-chain effects, applying audio effects to entire scenes, applying
scene-specific audio filters on sub-sources, and other such
possibilities.
- The secondary issue that needed fixing was audio buffering.
Previously, audio buffering was always a fixed buffer size, so it
would always have exactly a certain number of milliseconds of audio
buffering (and thus output delay). Instead, it now dynamically
increases audio buffering only as necessary, minimizing output delay,
and removing the need for users to have to worry about an audio
buffering setting.
The new design makes it so that audio from the leaves of the scene graph
flow to the root nodes, and can be intercepted by parent sources. Each
audio source handles its own buffering, and each audio tick a specific
number of audio frames are popped from the front of the circular buffer
on each audio source. Composite sources (such as scenes) can access the
audio for child sources and do custom processing or mixing on that
audio. Composite sources use the audio_render callback of sources to do
synchronous or deferred audio processing per audio tick. Things like
scenes now mix audio from their sub-sources.
2015-12-20 03:06:35 -08:00
|
|
|
source->audio_pending = true;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (source->info.audio_render) {
|
|
|
|
custom_audio_render(source, mixers, channels, sample_rate);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2019-08-21 14:35:40 -07:00
|
|
|
if (source->info.audio_mix) {
|
2019-08-31 01:12:58 -07:00
|
|
|
audio_submix(source, channels, sample_rate);
|
2019-08-21 14:35:40 -07:00
|
|
|
}
|
|
|
|
|
2016-01-30 08:50:31 -08:00
|
|
|
if (!source->audio_ts) {
|
libobs: Implement new audio subsystem
The new audio subsystem fixes two issues:
- First Primary issue it fixes is the ability for parent sources to
intercept the audio of child sources, and do custom processing on
them. The main reason for this was the ability to do custom
cross-fading in transitions, but it's also useful for things such as
side-chain effects, applying audio effects to entire scenes, applying
scene-specific audio filters on sub-sources, and other such
possibilities.
- The secondary issue that needed fixing was audio buffering.
Previously, audio buffering was always a fixed buffer size, so it
would always have exactly a certain number of milliseconds of audio
buffering (and thus output delay). Instead, it now dynamically
increases audio buffering only as necessary, minimizing output delay,
and removing the need for users to have to worry about an audio
buffering setting.
The new design makes it so that audio from the leaves of the scene graph
flow to the root nodes, and can be intercepted by parent sources. Each
audio source handles its own buffering, and each audio tick a specific
number of audio frames are popped from the front of the circular buffer
on each audio source. Composite sources (such as scenes) can access the
audio for child sources and do custom processing or mixing on that
audio. Composite sources use the audio_render callback of sources to do
synchronous or deferred audio processing per audio tick. Things like
scenes now mix audio from their sub-sources.
2015-12-20 03:06:35 -08:00
|
|
|
source->audio_pending = true;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
process_audio_source_tick(source, mixers, channels, sample_rate, size);
|
|
|
|
}
|
|
|
|
|
|
|
|
bool obs_source_audio_pending(const obs_source_t *source)
|
|
|
|
{
|
2016-01-29 23:32:29 -08:00
|
|
|
if (!obs_source_valid(source, "obs_source_audio_pending"))
|
|
|
|
return true;
|
|
|
|
|
2019-06-22 22:13:45 -07:00
|
|
|
return (is_composite_source(source) || is_audio_source(source))
|
|
|
|
? source->audio_pending
|
|
|
|
: true;
|
libobs: Implement new audio subsystem
The new audio subsystem fixes two issues:
- First Primary issue it fixes is the ability for parent sources to
intercept the audio of child sources, and do custom processing on
them. The main reason for this was the ability to do custom
cross-fading in transitions, but it's also useful for things such as
side-chain effects, applying audio effects to entire scenes, applying
scene-specific audio filters on sub-sources, and other such
possibilities.
- The secondary issue that needed fixing was audio buffering.
Previously, audio buffering was always a fixed buffer size, so it
would always have exactly a certain number of milliseconds of audio
buffering (and thus output delay). Instead, it now dynamically
increases audio buffering only as necessary, minimizing output delay,
and removing the need for users to have to worry about an audio
buffering setting.
The new design makes it so that audio from the leaves of the scene graph
flow to the root nodes, and can be intercepted by parent sources. Each
audio source handles its own buffering, and each audio tick a specific
number of audio frames are popped from the front of the circular buffer
on each audio source. Composite sources (such as scenes) can access the
audio for child sources and do custom processing or mixing on that
audio. Composite sources use the audio_render callback of sources to do
synchronous or deferred audio processing per audio tick. Things like
scenes now mix audio from their sub-sources.
2015-12-20 03:06:35 -08:00
|
|
|
}
|
|
|
|
|
2015-12-17 04:28:35 -08:00
|
|
|
uint64_t obs_source_get_audio_timestamp(const obs_source_t *source)
|
|
|
|
{
|
2019-06-22 22:13:45 -07:00
|
|
|
return obs_source_valid(source, "obs_source_get_audio_timestamp")
|
|
|
|
? source->audio_ts
|
|
|
|
: 0;
|
2015-12-17 04:28:35 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
void obs_source_get_audio_mix(const obs_source_t *source,
|
2019-06-22 22:13:45 -07:00
|
|
|
struct obs_source_audio_mix *audio)
|
2015-12-17 04:28:35 -08:00
|
|
|
{
|
|
|
|
if (!obs_source_valid(source, "obs_source_get_audio_mix"))
|
|
|
|
return;
|
|
|
|
if (!obs_ptr_valid(audio, "audio"))
|
|
|
|
return;
|
|
|
|
|
|
|
|
for (size_t mix = 0; mix < MAX_AUDIO_MIXES; mix++) {
|
|
|
|
for (size_t ch = 0; ch < MAX_AUDIO_CHANNELS; ch++) {
|
|
|
|
audio->output[mix].data[ch] =
|
|
|
|
source->audio_output_buf[mix][ch];
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2016-01-07 19:48:36 -08:00
|
|
|
|
|
|
|
void obs_source_add_audio_capture_callback(obs_source_t *source,
|
2019-06-22 22:13:45 -07:00
|
|
|
obs_source_audio_capture_t callback,
|
|
|
|
void *param)
|
2016-01-07 19:48:36 -08:00
|
|
|
{
|
|
|
|
struct audio_cb_info info = {callback, param};
|
|
|
|
|
|
|
|
if (!obs_source_valid(source, "obs_source_add_audio_capture_callback"))
|
|
|
|
return;
|
|
|
|
|
|
|
|
pthread_mutex_lock(&source->audio_cb_mutex);
|
|
|
|
da_push_back(source->audio_cb_list, &info);
|
|
|
|
pthread_mutex_unlock(&source->audio_cb_mutex);
|
|
|
|
}
|
|
|
|
|
2019-06-22 22:13:45 -07:00
|
|
|
void obs_source_remove_audio_capture_callback(
|
|
|
|
obs_source_t *source, obs_source_audio_capture_t callback, void *param)
|
2016-01-07 19:48:36 -08:00
|
|
|
{
|
|
|
|
struct audio_cb_info info = {callback, param};
|
|
|
|
|
2019-06-22 22:13:45 -07:00
|
|
|
if (!obs_source_valid(source,
|
|
|
|
"obs_source_remove_audio_capture_callback"))
|
2016-01-07 19:48:36 -08:00
|
|
|
return;
|
|
|
|
|
|
|
|
pthread_mutex_lock(&source->audio_cb_mutex);
|
|
|
|
da_erase_item(source->audio_cb_list, &info);
|
|
|
|
pthread_mutex_unlock(&source->audio_cb_mutex);
|
|
|
|
}
|
2017-02-05 21:37:35 -08:00
|
|
|
|
|
|
|
void obs_source_set_monitoring_type(obs_source_t *source,
|
2019-06-22 22:13:45 -07:00
|
|
|
enum obs_monitoring_type type)
|
2017-02-05 21:37:35 -08:00
|
|
|
{
|
|
|
|
bool was_on;
|
|
|
|
bool now_on;
|
|
|
|
|
|
|
|
if (!obs_source_valid(source, "obs_source_set_monitoring_type"))
|
|
|
|
return;
|
|
|
|
if (source->monitoring_type == type)
|
|
|
|
return;
|
|
|
|
|
|
|
|
was_on = source->monitoring_type != OBS_MONITORING_TYPE_NONE;
|
|
|
|
now_on = type != OBS_MONITORING_TYPE_NONE;
|
|
|
|
|
|
|
|
if (was_on != now_on) {
|
|
|
|
if (!was_on) {
|
|
|
|
source->monitor = audio_monitor_create(source);
|
|
|
|
} else {
|
|
|
|
audio_monitor_destroy(source->monitor);
|
|
|
|
source->monitor = NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
source->monitoring_type = type;
|
|
|
|
}
|
|
|
|
|
2019-06-22 22:13:45 -07:00
|
|
|
enum obs_monitoring_type
|
|
|
|
obs_source_get_monitoring_type(const obs_source_t *source)
|
2017-02-05 21:37:35 -08:00
|
|
|
{
|
2019-06-22 22:13:45 -07:00
|
|
|
return obs_source_valid(source, "obs_source_get_monitoring_type")
|
|
|
|
? source->monitoring_type
|
|
|
|
: OBS_MONITORING_TYPE_NONE;
|
2017-02-05 21:37:35 -08:00
|
|
|
}
|
2017-05-13 23:32:40 -07:00
|
|
|
|
|
|
|
void obs_source_set_async_unbuffered(obs_source_t *source, bool unbuffered)
|
|
|
|
{
|
|
|
|
if (!obs_source_valid(source, "obs_source_set_async_unbuffered"))
|
|
|
|
return;
|
|
|
|
|
|
|
|
source->async_unbuffered = unbuffered;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool obs_source_async_unbuffered(const obs_source_t *source)
|
|
|
|
{
|
2019-06-22 22:13:45 -07:00
|
|
|
return obs_source_valid(source, "obs_source_async_unbuffered")
|
|
|
|
? source->async_unbuffered
|
|
|
|
: false;
|
2017-05-13 23:32:40 -07:00
|
|
|
}
|
2017-09-13 03:12:56 -07:00
|
|
|
|
|
|
|
obs_data_t *obs_source_get_private_settings(obs_source_t *source)
|
|
|
|
{
|
|
|
|
if (!obs_ptr_valid(source, "obs_source_get_private_settings"))
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
obs_data_addref(source->private_settings);
|
|
|
|
return source->private_settings;
|
|
|
|
}
|
2017-09-29 00:37:33 -07:00
|
|
|
|
|
|
|
void obs_source_set_async_decoupled(obs_source_t *source, bool decouple)
|
|
|
|
{
|
|
|
|
if (!obs_ptr_valid(source, "obs_source_set_async_decoupled"))
|
|
|
|
return;
|
|
|
|
|
|
|
|
source->async_decoupled = decouple;
|
|
|
|
if (decouple) {
|
|
|
|
pthread_mutex_lock(&source->audio_buf_mutex);
|
|
|
|
source->timing_set = false;
|
|
|
|
reset_audio_data(source, 0);
|
|
|
|
pthread_mutex_unlock(&source->audio_buf_mutex);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
bool obs_source_async_decoupled(const obs_source_t *source)
|
|
|
|
{
|
2019-06-22 22:13:45 -07:00
|
|
|
return obs_source_valid(source, "obs_source_async_decoupled")
|
|
|
|
? source->async_decoupled
|
|
|
|
: false;
|
2017-09-29 00:37:33 -07:00
|
|
|
}
|
2017-12-25 12:20:54 -08:00
|
|
|
|
|
|
|
/* hidden/undocumented export to allow source type redefinition for scripts */
|
|
|
|
EXPORT void obs_enable_source_type(const char *name, bool enable)
|
|
|
|
{
|
|
|
|
struct obs_source_info *info = get_source_info(name);
|
|
|
|
if (!info)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (enable)
|
|
|
|
info->output_flags &= ~OBS_SOURCE_CAP_DISABLED;
|
|
|
|
else
|
|
|
|
info->output_flags |= OBS_SOURCE_CAP_DISABLED;
|
|
|
|
}
|
2017-10-08 03:15:28 -07:00
|
|
|
|
|
|
|
enum speaker_layout obs_source_get_speaker_layout(obs_source_t *source)
|
|
|
|
{
|
|
|
|
if (!obs_source_valid(source, "obs_source_get_audio_channels"))
|
|
|
|
return SPEAKERS_UNKNOWN;
|
|
|
|
|
|
|
|
return source->sample_info.speakers;
|
|
|
|
}
|
|
|
|
|
|
|
|
void obs_source_set_balance_value(obs_source_t *source, float balance)
|
|
|
|
{
|
|
|
|
if (!obs_source_valid(source, "obs_source_set_balance_value"))
|
|
|
|
return;
|
|
|
|
|
|
|
|
source->balance = balance;
|
|
|
|
}
|
|
|
|
|
|
|
|
float obs_source_get_balance_value(const obs_source_t *source)
|
|
|
|
{
|
2019-06-22 22:13:45 -07:00
|
|
|
return obs_source_valid(source, "obs_source_get_balance_value")
|
|
|
|
? source->balance
|
|
|
|
: 0.5f;
|
2017-10-08 03:15:28 -07:00
|
|
|
}
|
2019-09-19 23:37:29 -07:00
|
|
|
|
|
|
|
void obs_source_set_audio_active(obs_source_t *source, bool active)
|
|
|
|
{
|
|
|
|
if (!obs_source_valid(source, "obs_source_set_audio_active"))
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (os_atomic_set_bool(&source->audio_active, active) == active)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (active)
|
|
|
|
obs_source_dosignal(source, "source_audio_activate",
|
|
|
|
"audio_activate");
|
|
|
|
else
|
|
|
|
obs_source_dosignal(source, "source_audio_deactivate",
|
|
|
|
"audio_deactivate");
|
|
|
|
}
|
|
|
|
|
|
|
|
bool obs_source_audio_active(const obs_source_t *source)
|
|
|
|
{
|
|
|
|
return obs_source_valid(source, "obs_source_audio_active")
|
|
|
|
? os_atomic_load_bool(&source->audio_active)
|
|
|
|
: false;
|
|
|
|
}
|
2019-09-20 00:13:51 -07:00
|
|
|
|
|
|
|
uint32_t obs_source_get_last_obs_version(const obs_source_t *source)
|
|
|
|
{
|
|
|
|
return obs_source_valid(source, "obs_source_get_last_obs_version")
|
|
|
|
? source->last_obs_ver
|
|
|
|
: 0;
|
|
|
|
}
|
2019-07-27 21:59:16 -07:00
|
|
|
|
|
|
|
enum obs_icon_type obs_source_get_icon_type(const char *id)
|
|
|
|
{
|
|
|
|
const struct obs_source_info *info = get_source_info(id);
|
|
|
|
return (info) ? info->icon_type : OBS_ICON_TYPE_UNKNOWN;
|
|
|
|
}
|