2013-09-30 19:37:13 -07:00
|
|
|
/******************************************************************************
|
2014-02-13 07:58:31 -08:00
|
|
|
Copyright (C) 2013-2014 by Hugh Bailey <obs.jim@gmail.com>
|
2013-09-30 19:37:13 -07:00
|
|
|
|
|
|
|
This program is free software: you can redistribute it and/or modify
|
|
|
|
it under the terms of the GNU General Public License as published by
|
2013-12-02 21:24:38 -08:00
|
|
|
the Free Software Foundation, either version 2 of the License, or
|
2013-09-30 19:37:13 -07:00
|
|
|
(at your option) any later version.
|
|
|
|
|
|
|
|
This program is distributed in the hope that it will be useful,
|
|
|
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
GNU General Public License for more details.
|
|
|
|
|
|
|
|
You should have received a copy of the GNU General Public License
|
|
|
|
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
******************************************************************************/
|
|
|
|
|
2014-07-27 09:25:55 -07:00
|
|
|
#include <inttypes.h>
|
|
|
|
|
2014-12-11 19:51:30 -08:00
|
|
|
#include "graphics/matrix4.h"
|
2013-12-30 09:09:20 -08:00
|
|
|
#include "callback/calldata.h"
|
|
|
|
|
2013-09-30 19:37:13 -07:00
|
|
|
#include "obs.h"
|
2014-01-26 17:48:14 -08:00
|
|
|
#include "obs-internal.h"
|
2013-09-30 19:37:13 -07:00
|
|
|
|
2014-02-05 20:03:06 -08:00
|
|
|
struct obs_core *obs = NULL;
|
2013-10-14 12:37:52 -07:00
|
|
|
|
(API Change) Refactor module handling
Changed API:
- char *obs_find_plugin_file(const char *sub_path);
Changed to: char *obs_module_file(const char *file);
Cahnge it so you no longer need to specify a sub-path such as:
obs_find_plugin_file("module_name/file.ext")
Instead, now automatically handle the module data path so all you need
to do is:
obs_module_file("file.ext")
- int obs_load_module(const char *name);
Changed to: int obs_open_module(obs_module_t *module,
const char *path,
const char *data_path);
bool obs_init_module(obs_module_t module);
Change the module loading API so that if the front-end chooses, it can
load modules directly from a specified path, and associate a data
directory with it on the spot.
The module will not be initialized immediately; obs_init_module must
be called on the module pointer in order to fully initialize the
module. This is done so a module can be disabled by the front-end if
the it so chooses.
New API:
- void obs_add_module_path(const char *bin, const char *data);
These functions allow you to specify new module search paths to add,
and allow you to search through them, or optionally just load all
modules from them. If the string %module% is included, it will
replace it with the module's name when that string is used as a
lookup. Data paths are now directly added to the module's internal
storage structure, and when obs_find_module_file is used, it will look
up the pointer to the obs_module structure and get its data directory
that way.
Example:
obs_add_module_path("/opt/obs/my-modules/%module%/bin",
"/opt/obs/my-modules/%module%/data");
This would cause it to additionally look for the binary of a
hypthetical module named "foo" at /opt/obs/my-modules/foo/bin/foo.so
(or libfoo.so), and then look for the data in
/opt/obs/my-modules/foo/data.
This gives the front-end more flexibility for handling third-party
plugin modules, or handling all plugin modules in a custom way.
- void obs_find_modules(obs_find_module_callback_t callback, void
*param);
This searches the existing paths for modules and calls the callback
function when any are found. Useful for plugin management and custom
handling of the paths by the front-end if desired.
- void obs_load_all_modules(void);
Search through the paths and both loads and initializes all modules
automatically without custom handling.
- void obs_enum_modules(obs_enum_module_callback_t callback,
void *param);
Enumerates currently opened modules.
2014-07-27 12:00:11 -07:00
|
|
|
extern void add_default_module_paths(void);
|
2013-11-01 14:33:00 -07:00
|
|
|
extern char *find_libobs_data_file(const char *file);
|
|
|
|
|
Simplify media i/o interfaces
Completely revamped the entire media i/o data and handlers. The
original idea was to have a system that would have connecting media
inputs and outputs, but at a certain point I realized that this was an
unnecessary complexity for what we wanted to do. (Also, it reminded me
of directshow filters, and I HATE directshow with a passion, and
wouldn't wish it upon my greatest enemy)
Now, audio/video outputs are connected to directly, with better callback
handlers, and will eventually have the ability to automatically handle
conversions such as 4:4:4 to 4:2:0 when connecting to an input that uses
them. Doing this will allow the video/audio i/o handlers to also
prevent duplicate conversion, as well as make it easier/simple to use.
My true goal for this is to make output and encoder plugins as simple to
create as possible. I want to be able to be able to create an output
plugin with almost no real hassle of having to worry about image
conversions, media inputs/outputs, etc. A plugin developer shouldn't
have to handle that sort of stuff when he/she doesn't really need to.
Plugins will be able to simply create a callback via obs_video() and/or
obs_audio(), and they will automatically receive the audio/video data in
the formats requested via a simple callback, without needing to do
almost anything else at all.
2014-01-14 00:58:47 -08:00
|
|
|
static inline void make_video_info(struct video_output_info *vi,
|
2013-11-20 14:00:16 -08:00
|
|
|
struct obs_video_info *ovi)
|
|
|
|
{
|
|
|
|
vi->name = "video";
|
2014-01-19 02:16:41 -08:00
|
|
|
vi->format = ovi->output_format;
|
2013-11-20 14:00:16 -08:00
|
|
|
vi->fps_num = ovi->fps_num;
|
|
|
|
vi->fps_den = ovi->fps_den;
|
|
|
|
vi->width = ovi->output_width;
|
|
|
|
vi->height = ovi->output_height;
|
2014-12-11 19:51:30 -08:00
|
|
|
vi->range = ovi->range;
|
|
|
|
vi->colorspace = ovi->colorspace;
|
libobs: Redesign/optimize frame encoding handling
Previously, the design for the interaction between the encoder thread
and the graphics thread was that the encoder thread would signal to the
graphics thread when to start drawing each frame. The original idea
behind this was to prevent mutually cascading stalls of encoding or
graphics rendering (i.e., if rendering took too long, then encoding
would have to catch up, then rendering would have to catch up again, and
so on, cascading upon each other). The ultimate goal was to prevent
encoding from impacting graphics and vise versa.
However, eventually it was realized that there were some fundamental
flaws with this design.
1. Stray frame duplication. You could not guarantee that a frame would
render on time, so sometimes frames would unintentionally be lost if
there was any sort of minor hiccup or if the thread took too long to
be scheduled I'm guessing.
2. Frame timing in the rendering thread was less accurate. The only
place where frame timing was accurate was in the encoder thread, and
the graphics thread was at the whim of thread scheduling. On higher
end computers it was typically fine, but it was just generally not
guaranteed that a frame would be rendered when it was supposed to be
rendered.
So the solution (originally proposed by r1ch and paibox) is to instead
keep the encoding and graphics threads separate as usual, but instead of
the encoder thread controlling the graphics thread, the graphics thread
now controls the encoder thread. The encoder thread keeps a limited
cache of frames, then the graphics thread copies frames in to the cache
and increments a semaphore to schedule the encoder thread to encode that
data.
In the cache, each frame has an encode counter. If the frame cache is
full (e.g., the encoder taking too long to return frames), it will not
cache a new frame, but instead will just increment the counter on the
last frame in the cache to schedule that frame to encode again, ensuring
that frames are on time and reducing CPU usage by lowering video
complexity. If the graphics thread takes too long to render a frame,
then it will add that frame with the count value set to the total amount
of frames that were missed (actual legitimately duplicated frames).
Because the cache gives many frames of breathing room for the encoder to
encode frames, this design helps improve results especially when using
encoding presets that have higher complexity and CPU usage, minimizing
the risk of needlessly skipped or duplicated frames.
I also managed to sneak in what should be a bit of an optimization to
reduce copying of frame data, though how much of an optimization it
ultimately ends up being is debatable.
So to sum it up, this commit increases accuracy of frame timing,
completely removes stray frame duplication, gives better results for
higher complexity encoding presets, and potentially optimizes the frame
pipeline a tiny bit.
2014-12-31 01:53:13 -08:00
|
|
|
vi->cache_size = 6;
|
2013-11-20 14:00:16 -08:00
|
|
|
}
|
|
|
|
|
2014-02-16 18:28:21 -08:00
|
|
|
#define PIXEL_SIZE 4
|
|
|
|
|
|
|
|
#define GET_ALIGN(val, align) \
|
|
|
|
(((val) + (align-1)) & ~(align-1))
|
|
|
|
|
|
|
|
static inline void set_420p_sizes(const struct obs_video_info *ovi)
|
|
|
|
{
|
|
|
|
struct obs_core_video *video = &obs->video;
|
|
|
|
uint32_t chroma_pixels;
|
|
|
|
uint32_t total_bytes;
|
|
|
|
|
|
|
|
chroma_pixels = (ovi->output_width * ovi->output_height / 4);
|
|
|
|
chroma_pixels = GET_ALIGN(chroma_pixels, PIXEL_SIZE);
|
|
|
|
|
|
|
|
video->plane_offsets[0] = 0;
|
|
|
|
video->plane_offsets[1] = ovi->output_width * ovi->output_height;
|
|
|
|
video->plane_offsets[2] = video->plane_offsets[1] + chroma_pixels;
|
|
|
|
|
|
|
|
video->plane_linewidth[0] = ovi->output_width;
|
|
|
|
video->plane_linewidth[1] = ovi->output_width/2;
|
|
|
|
video->plane_linewidth[2] = ovi->output_width/2;
|
|
|
|
|
|
|
|
video->plane_sizes[0] = video->plane_offsets[1];
|
|
|
|
video->plane_sizes[1] = video->plane_sizes[0]/4;
|
|
|
|
video->plane_sizes[2] = video->plane_sizes[1];
|
|
|
|
|
|
|
|
total_bytes = video->plane_offsets[2] + chroma_pixels;
|
|
|
|
|
|
|
|
video->conversion_height =
|
|
|
|
(total_bytes/PIXEL_SIZE + ovi->output_width-1) /
|
|
|
|
ovi->output_width;
|
|
|
|
|
|
|
|
video->conversion_height = GET_ALIGN(video->conversion_height, 2);
|
|
|
|
video->conversion_tech = "Planar420";
|
|
|
|
}
|
|
|
|
|
2014-04-04 11:49:23 -07:00
|
|
|
static inline void set_nv12_sizes(const struct obs_video_info *ovi)
|
|
|
|
{
|
|
|
|
struct obs_core_video *video = &obs->video;
|
|
|
|
uint32_t chroma_pixels;
|
|
|
|
uint32_t total_bytes;
|
|
|
|
|
|
|
|
chroma_pixels = (ovi->output_width * ovi->output_height / 2);
|
|
|
|
chroma_pixels = GET_ALIGN(chroma_pixels, PIXEL_SIZE);
|
|
|
|
|
|
|
|
video->plane_offsets[0] = 0;
|
|
|
|
video->plane_offsets[1] = ovi->output_width * ovi->output_height;
|
|
|
|
|
|
|
|
video->plane_linewidth[0] = ovi->output_width;
|
|
|
|
video->plane_linewidth[1] = ovi->output_width;
|
|
|
|
|
|
|
|
video->plane_sizes[0] = video->plane_offsets[1];
|
|
|
|
video->plane_sizes[1] = video->plane_sizes[0]/2;
|
|
|
|
|
|
|
|
total_bytes = video->plane_offsets[1] + chroma_pixels;
|
|
|
|
|
|
|
|
video->conversion_height =
|
|
|
|
(total_bytes/PIXEL_SIZE + ovi->output_width-1) /
|
|
|
|
ovi->output_width;
|
|
|
|
|
|
|
|
video->conversion_height = GET_ALIGN(video->conversion_height, 2);
|
|
|
|
video->conversion_tech = "NV12";
|
|
|
|
}
|
|
|
|
|
2015-04-16 22:52:44 -07:00
|
|
|
static inline void set_444p_sizes(const struct obs_video_info *ovi)
|
|
|
|
{
|
|
|
|
struct obs_core_video *video = &obs->video;
|
|
|
|
uint32_t chroma_pixels;
|
|
|
|
uint32_t total_bytes;
|
|
|
|
|
|
|
|
chroma_pixels = (ovi->output_width * ovi->output_height);
|
|
|
|
chroma_pixels = GET_ALIGN(chroma_pixels, PIXEL_SIZE);
|
|
|
|
|
|
|
|
video->plane_offsets[0] = 0;
|
|
|
|
video->plane_offsets[1] = chroma_pixels;
|
|
|
|
video->plane_offsets[2] = chroma_pixels + chroma_pixels;
|
|
|
|
|
|
|
|
video->plane_linewidth[0] = ovi->output_width;
|
|
|
|
video->plane_linewidth[1] = ovi->output_width;
|
|
|
|
video->plane_linewidth[2] = ovi->output_width;
|
|
|
|
|
|
|
|
video->plane_sizes[0] = chroma_pixels;
|
|
|
|
video->plane_sizes[1] = chroma_pixels;
|
|
|
|
video->plane_sizes[2] = chroma_pixels;
|
|
|
|
|
|
|
|
total_bytes = video->plane_offsets[2] + chroma_pixels;
|
|
|
|
|
|
|
|
video->conversion_height =
|
|
|
|
(total_bytes/PIXEL_SIZE + ovi->output_width-1) /
|
|
|
|
ovi->output_width;
|
|
|
|
|
|
|
|
video->conversion_height = GET_ALIGN(video->conversion_height, 2);
|
|
|
|
video->conversion_tech = "Planar444";
|
|
|
|
}
|
|
|
|
|
2014-02-16 18:28:21 -08:00
|
|
|
static inline void calc_gpu_conversion_sizes(const struct obs_video_info *ovi)
|
|
|
|
{
|
|
|
|
obs->video.conversion_height = 0;
|
|
|
|
memset(obs->video.plane_offsets, 0, sizeof(obs->video.plane_offsets));
|
|
|
|
memset(obs->video.plane_sizes, 0, sizeof(obs->video.plane_sizes));
|
|
|
|
memset(obs->video.plane_linewidth, 0,
|
|
|
|
sizeof(obs->video.plane_linewidth));
|
|
|
|
|
|
|
|
switch ((uint32_t)ovi->output_format) {
|
|
|
|
case VIDEO_FORMAT_I420:
|
|
|
|
set_420p_sizes(ovi);
|
2014-04-04 11:49:23 -07:00
|
|
|
break;
|
|
|
|
case VIDEO_FORMAT_NV12:
|
|
|
|
set_nv12_sizes(ovi);
|
|
|
|
break;
|
2015-04-16 22:52:44 -07:00
|
|
|
case VIDEO_FORMAT_I444:
|
|
|
|
set_444p_sizes(ovi);
|
|
|
|
break;
|
2014-02-16 18:28:21 -08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool obs_init_gpu_conversion(struct obs_video_info *ovi)
|
|
|
|
{
|
|
|
|
struct obs_core_video *video = &obs->video;
|
|
|
|
|
|
|
|
calc_gpu_conversion_sizes(ovi);
|
|
|
|
|
|
|
|
if (!video->conversion_height) {
|
|
|
|
blog(LOG_INFO, "GPU conversion not available for format: %u",
|
|
|
|
(unsigned int)ovi->output_format);
|
|
|
|
video->gpu_conversion = false;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (size_t i = 0; i < NUM_TEXTURES; i++) {
|
2014-08-07 23:42:07 -07:00
|
|
|
video->convert_textures[i] = gs_texture_create(
|
2014-02-16 18:28:21 -08:00
|
|
|
ovi->output_width, video->conversion_height,
|
2014-08-07 23:42:07 -07:00
|
|
|
GS_RGBA, 1, NULL, GS_RENDER_TARGET);
|
2014-02-16 18:28:21 -08:00
|
|
|
|
|
|
|
if (!video->convert_textures[i])
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2013-11-26 21:20:11 -08:00
|
|
|
static bool obs_init_textures(struct obs_video_info *ovi)
|
|
|
|
{
|
2014-02-05 20:03:06 -08:00
|
|
|
struct obs_core_video *video = &obs->video;
|
2014-02-16 18:28:21 -08:00
|
|
|
uint32_t output_height = video->gpu_conversion ?
|
|
|
|
video->conversion_height : ovi->output_height;
|
2013-11-26 21:20:11 -08:00
|
|
|
size_t i;
|
|
|
|
|
|
|
|
for (i = 0; i < NUM_TEXTURES; i++) {
|
2014-08-07 23:42:07 -07:00
|
|
|
video->copy_surfaces[i] = gs_stagesurface_create(
|
2014-02-16 18:28:21 -08:00
|
|
|
ovi->output_width, output_height, GS_RGBA);
|
2013-11-26 21:20:11 -08:00
|
|
|
|
|
|
|
if (!video->copy_surfaces[i])
|
|
|
|
return false;
|
|
|
|
|
2014-08-07 23:42:07 -07:00
|
|
|
video->render_textures[i] = gs_texture_create(
|
2013-11-26 21:20:11 -08:00
|
|
|
ovi->base_width, ovi->base_height,
|
2014-08-07 23:42:07 -07:00
|
|
|
GS_RGBA, 1, NULL, GS_RENDER_TARGET);
|
2013-11-26 21:20:11 -08:00
|
|
|
|
|
|
|
if (!video->render_textures[i])
|
|
|
|
return false;
|
|
|
|
|
2014-08-07 23:42:07 -07:00
|
|
|
video->output_textures[i] = gs_texture_create(
|
2013-11-26 21:20:11 -08:00
|
|
|
ovi->output_width, ovi->output_height,
|
2014-08-07 23:42:07 -07:00
|
|
|
GS_RGBA, 1, NULL, GS_RENDER_TARGET);
|
2013-11-26 21:20:11 -08:00
|
|
|
|
|
|
|
if (!video->output_textures[i])
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2016-03-15 20:22:03 -07:00
|
|
|
gs_effect_t *obs_load_effect(gs_effect_t **effect, const char *file)
|
|
|
|
{
|
|
|
|
if (!*effect) {
|
|
|
|
char *filename = find_libobs_data_file(file);
|
|
|
|
*effect = gs_effect_create_from_file(filename, NULL);
|
|
|
|
bfree(filename);
|
|
|
|
}
|
|
|
|
|
|
|
|
return *effect;
|
|
|
|
}
|
|
|
|
|
2014-07-20 17:40:57 -07:00
|
|
|
static int obs_init_graphics(struct obs_video_info *ovi)
|
2013-11-20 14:00:16 -08:00
|
|
|
{
|
2014-02-05 20:03:06 -08:00
|
|
|
struct obs_core_video *video = &obs->video;
|
2016-01-03 15:52:56 -08:00
|
|
|
uint8_t transparent_tex_data[2*2*4] = {0};
|
|
|
|
const uint8_t *transparent_tex = transparent_tex_data;
|
2013-10-25 10:25:28 -07:00
|
|
|
bool success = true;
|
2013-11-20 14:00:16 -08:00
|
|
|
int errorcode;
|
2013-09-30 19:37:13 -07:00
|
|
|
|
2013-11-20 14:00:16 -08:00
|
|
|
errorcode = gs_create(&video->graphics, ovi->graphics_module,
|
2015-07-31 22:34:37 -07:00
|
|
|
ovi->adapter);
|
2013-09-30 19:37:13 -07:00
|
|
|
if (errorcode != GS_SUCCESS) {
|
2014-07-20 17:40:57 -07:00
|
|
|
switch (errorcode) {
|
|
|
|
case GS_ERROR_MODULE_NOT_FOUND:
|
|
|
|
return OBS_VIDEO_MODULE_NOT_FOUND;
|
|
|
|
case GS_ERROR_NOT_SUPPORTED:
|
|
|
|
return OBS_VIDEO_NOT_SUPPORTED;
|
|
|
|
default:
|
|
|
|
return OBS_VIDEO_FAIL;
|
|
|
|
}
|
2013-09-30 19:37:13 -07:00
|
|
|
}
|
|
|
|
|
2014-08-07 23:42:07 -07:00
|
|
|
gs_enter_context(video->graphics);
|
2013-09-30 19:37:13 -07:00
|
|
|
|
2014-07-20 17:40:57 -07:00
|
|
|
char *filename = find_libobs_data_file("default.effect");
|
2014-08-07 23:42:07 -07:00
|
|
|
video->default_effect = gs_effect_create_from_file(filename,
|
2014-07-20 17:40:57 -07:00
|
|
|
NULL);
|
|
|
|
bfree(filename);
|
|
|
|
|
2014-10-14 06:47:25 -07:00
|
|
|
if (gs_get_device_type() == GS_DEVICE_OPENGL) {
|
|
|
|
filename = find_libobs_data_file("default_rect.effect");
|
|
|
|
video->default_rect_effect = gs_effect_create_from_file(
|
|
|
|
filename, NULL);
|
|
|
|
bfree(filename);
|
|
|
|
}
|
2014-10-03 06:56:38 -07:00
|
|
|
|
2015-03-14 00:24:32 -07:00
|
|
|
filename = find_libobs_data_file("opaque.effect");
|
|
|
|
video->opaque_effect = gs_effect_create_from_file(filename,
|
|
|
|
NULL);
|
|
|
|
bfree(filename);
|
|
|
|
|
2014-07-20 17:40:57 -07:00
|
|
|
filename = find_libobs_data_file("solid.effect");
|
2014-08-07 23:42:07 -07:00
|
|
|
video->solid_effect = gs_effect_create_from_file(filename,
|
2014-07-20 17:40:57 -07:00
|
|
|
NULL);
|
|
|
|
bfree(filename);
|
|
|
|
|
|
|
|
filename = find_libobs_data_file("format_conversion.effect");
|
2014-08-07 23:42:07 -07:00
|
|
|
video->conversion_effect = gs_effect_create_from_file(filename,
|
2014-07-20 17:40:57 -07:00
|
|
|
NULL);
|
|
|
|
bfree(filename);
|
|
|
|
|
2014-12-14 23:45:44 -08:00
|
|
|
filename = find_libobs_data_file("bicubic_scale.effect");
|
|
|
|
video->bicubic_effect = gs_effect_create_from_file(filename,
|
|
|
|
NULL);
|
|
|
|
bfree(filename);
|
|
|
|
|
|
|
|
filename = find_libobs_data_file("lanczos_scale.effect");
|
|
|
|
video->lanczos_effect = gs_effect_create_from_file(filename,
|
|
|
|
NULL);
|
|
|
|
bfree(filename);
|
|
|
|
|
2015-04-06 07:09:47 -07:00
|
|
|
filename = find_libobs_data_file("bilinear_lowres_scale.effect");
|
|
|
|
video->bilinear_lowres_effect = gs_effect_create_from_file(filename,
|
|
|
|
NULL);
|
|
|
|
bfree(filename);
|
|
|
|
|
2016-03-26 21:41:49 -07:00
|
|
|
filename = find_libobs_data_file("premultiplied_alpha.effect");
|
|
|
|
video->premultiplied_alpha_effect = gs_effect_create_from_file(filename,
|
|
|
|
NULL);
|
|
|
|
bfree(filename);
|
|
|
|
|
2016-01-03 15:52:56 -08:00
|
|
|
obs->video.transparent_texture = gs_texture_create(2, 2, GS_RGBA, 1,
|
|
|
|
&transparent_tex, 0);
|
|
|
|
|
2014-07-20 17:40:57 -07:00
|
|
|
if (!video->default_effect)
|
|
|
|
success = false;
|
2014-10-14 06:47:25 -07:00
|
|
|
if (gs_get_device_type() == GS_DEVICE_OPENGL) {
|
|
|
|
if (!video->default_rect_effect)
|
|
|
|
success = false;
|
|
|
|
}
|
2015-03-14 00:24:32 -07:00
|
|
|
if (!video->opaque_effect)
|
|
|
|
success = false;
|
2014-07-20 17:40:57 -07:00
|
|
|
if (!video->solid_effect)
|
|
|
|
success = false;
|
|
|
|
if (!video->conversion_effect)
|
|
|
|
success = false;
|
2016-03-26 21:41:49 -07:00
|
|
|
if (!video->premultiplied_alpha_effect)
|
|
|
|
success = false;
|
2016-01-03 15:52:56 -08:00
|
|
|
if (!video->transparent_texture)
|
|
|
|
success = false;
|
2013-09-30 19:37:13 -07:00
|
|
|
|
2014-08-07 23:42:07 -07:00
|
|
|
gs_leave_context();
|
2014-07-20 17:40:57 -07:00
|
|
|
return success ? OBS_VIDEO_SUCCESS : OBS_VIDEO_FAIL;
|
2013-09-30 19:37:13 -07:00
|
|
|
}
|
|
|
|
|
2014-12-11 19:51:30 -08:00
|
|
|
static inline void set_video_matrix(struct obs_core_video *video,
|
|
|
|
struct obs_video_info *ovi)
|
|
|
|
{
|
|
|
|
struct matrix4 mat;
|
|
|
|
struct vec4 r_row;
|
|
|
|
|
|
|
|
if (format_is_yuv(ovi->output_format)) {
|
|
|
|
video_format_get_parameters(ovi->colorspace, ovi->range,
|
|
|
|
(float*)&mat, NULL, NULL);
|
|
|
|
matrix4_inv(&mat, &mat);
|
|
|
|
|
|
|
|
/* swap R and G */
|
|
|
|
r_row = mat.x;
|
|
|
|
mat.x = mat.y;
|
|
|
|
mat.y = r_row;
|
|
|
|
} else {
|
|
|
|
matrix4_identity(&mat);
|
|
|
|
}
|
|
|
|
|
|
|
|
memcpy(video->color_matrix, &mat, sizeof(float) * 16);
|
|
|
|
}
|
|
|
|
|
2014-07-20 17:40:57 -07:00
|
|
|
static int obs_init_video(struct obs_video_info *ovi)
|
2013-09-30 19:37:13 -07:00
|
|
|
{
|
2014-02-05 20:03:06 -08:00
|
|
|
struct obs_core_video *video = &obs->video;
|
Simplify media i/o interfaces
Completely revamped the entire media i/o data and handlers. The
original idea was to have a system that would have connecting media
inputs and outputs, but at a certain point I realized that this was an
unnecessary complexity for what we wanted to do. (Also, it reminded me
of directshow filters, and I HATE directshow with a passion, and
wouldn't wish it upon my greatest enemy)
Now, audio/video outputs are connected to directly, with better callback
handlers, and will eventually have the ability to automatically handle
conversions such as 4:4:4 to 4:2:0 when connecting to an input that uses
them. Doing this will allow the video/audio i/o handlers to also
prevent duplicate conversion, as well as make it easier/simple to use.
My true goal for this is to make output and encoder plugins as simple to
create as possible. I want to be able to be able to create an output
plugin with almost no real hassle of having to worry about image
conversions, media inputs/outputs, etc. A plugin developer shouldn't
have to handle that sort of stuff when he/she doesn't really need to.
Plugins will be able to simply create a callback via obs_video() and/or
obs_audio(), and they will automatically receive the audio/video data in
the formats requested via a simple callback, without needing to do
almost anything else at all.
2014-01-14 00:58:47 -08:00
|
|
|
struct video_output_info vi;
|
2013-11-20 14:00:16 -08:00
|
|
|
int errorcode;
|
|
|
|
|
|
|
|
make_video_info(&vi, ovi);
|
2014-02-22 19:14:19 -08:00
|
|
|
video->base_width = ovi->base_width;
|
|
|
|
video->base_height = ovi->base_height;
|
|
|
|
video->output_width = ovi->output_width;
|
|
|
|
video->output_height = ovi->output_height;
|
|
|
|
video->gpu_conversion = ovi->gpu_conversion;
|
2014-12-14 23:45:44 -08:00
|
|
|
video->scale_type = ovi->scale_type;
|
2014-02-22 19:14:19 -08:00
|
|
|
|
2014-12-11 19:51:30 -08:00
|
|
|
set_video_matrix(video, ovi);
|
|
|
|
|
Simplify media i/o interfaces
Completely revamped the entire media i/o data and handlers. The
original idea was to have a system that would have connecting media
inputs and outputs, but at a certain point I realized that this was an
unnecessary complexity for what we wanted to do. (Also, it reminded me
of directshow filters, and I HATE directshow with a passion, and
wouldn't wish it upon my greatest enemy)
Now, audio/video outputs are connected to directly, with better callback
handlers, and will eventually have the ability to automatically handle
conversions such as 4:4:4 to 4:2:0 when connecting to an input that uses
them. Doing this will allow the video/audio i/o handlers to also
prevent duplicate conversion, as well as make it easier/simple to use.
My true goal for this is to make output and encoder plugins as simple to
create as possible. I want to be able to be able to create an output
plugin with almost no real hassle of having to worry about image
conversions, media inputs/outputs, etc. A plugin developer shouldn't
have to handle that sort of stuff when he/she doesn't really need to.
Plugins will be able to simply create a callback via obs_video() and/or
obs_audio(), and they will automatically receive the audio/video data in
the formats requested via a simple callback, without needing to do
almost anything else at all.
2014-01-14 00:58:47 -08:00
|
|
|
errorcode = video_output_open(&video->video, &vi);
|
2013-11-20 14:00:16 -08:00
|
|
|
|
|
|
|
if (errorcode != VIDEO_OUTPUT_SUCCESS) {
|
2014-07-20 17:40:57 -07:00
|
|
|
if (errorcode == VIDEO_OUTPUT_INVALIDPARAM) {
|
2013-11-20 14:00:16 -08:00
|
|
|
blog(LOG_ERROR, "Invalid video parameters specified");
|
2014-07-20 17:40:57 -07:00
|
|
|
return OBS_VIDEO_INVALID_PARAM;
|
|
|
|
} else {
|
2013-11-20 14:00:16 -08:00
|
|
|
blog(LOG_ERROR, "Could not open video output");
|
2014-07-20 17:40:57 -07:00
|
|
|
}
|
|
|
|
return OBS_VIDEO_FAIL;
|
2013-11-20 14:00:16 -08:00
|
|
|
}
|
|
|
|
|
2014-08-07 23:42:07 -07:00
|
|
|
gs_enter_context(video->graphics);
|
2014-02-22 19:14:19 -08:00
|
|
|
|
|
|
|
if (ovi->gpu_conversion && !obs_init_gpu_conversion(ovi))
|
2014-07-20 17:40:57 -07:00
|
|
|
return OBS_VIDEO_FAIL;
|
2014-02-22 19:14:19 -08:00
|
|
|
if (!obs_init_textures(ovi))
|
2014-07-20 17:40:57 -07:00
|
|
|
return OBS_VIDEO_FAIL;
|
2014-02-22 19:14:19 -08:00
|
|
|
|
2014-08-07 23:42:07 -07:00
|
|
|
gs_leave_context();
|
2014-02-22 19:14:19 -08:00
|
|
|
|
2013-11-20 14:00:16 -08:00
|
|
|
errorcode = pthread_create(&video->video_thread, NULL,
|
|
|
|
obs_video_thread, obs);
|
|
|
|
if (errorcode != 0)
|
2014-07-20 17:40:57 -07:00
|
|
|
return OBS_VIDEO_FAIL;
|
2013-09-30 19:37:13 -07:00
|
|
|
|
2013-11-20 14:00:16 -08:00
|
|
|
video->thread_initialized = true;
|
2014-07-20 17:40:57 -07:00
|
|
|
return OBS_VIDEO_SUCCESS;
|
2013-09-30 19:37:13 -07:00
|
|
|
}
|
|
|
|
|
2014-02-13 07:58:31 -08:00
|
|
|
static void stop_video(void)
|
2013-09-30 19:37:13 -07:00
|
|
|
{
|
2014-02-05 20:03:06 -08:00
|
|
|
struct obs_core_video *video = &obs->video;
|
2014-02-13 07:58:31 -08:00
|
|
|
void *thread_retval;
|
2013-09-30 19:37:13 -07:00
|
|
|
|
2013-11-20 14:00:16 -08:00
|
|
|
if (video->video) {
|
|
|
|
video_output_stop(video->video);
|
2013-11-26 21:20:11 -08:00
|
|
|
if (video->thread_initialized) {
|
2013-11-20 14:00:16 -08:00
|
|
|
pthread_join(video->video_thread, &thread_retval);
|
2013-11-26 21:20:11 -08:00
|
|
|
video->thread_initialized = false;
|
|
|
|
}
|
2014-02-13 07:58:31 -08:00
|
|
|
}
|
2013-11-20 14:00:16 -08:00
|
|
|
|
2014-02-13 07:58:31 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void obs_free_video(void)
|
|
|
|
{
|
|
|
|
struct obs_core_video *video = &obs->video;
|
|
|
|
|
|
|
|
if (video->video) {
|
2013-11-20 14:00:16 -08:00
|
|
|
video_output_close(video->video);
|
2013-11-26 21:20:11 -08:00
|
|
|
video->video = NULL;
|
|
|
|
|
2014-02-22 19:14:19 -08:00
|
|
|
if (!video->graphics)
|
|
|
|
return;
|
2013-11-20 14:00:16 -08:00
|
|
|
|
2014-08-07 23:42:07 -07:00
|
|
|
gs_enter_context(video->graphics);
|
2013-11-20 14:00:16 -08:00
|
|
|
|
2014-02-22 19:14:19 -08:00
|
|
|
if (video->mapped_surface) {
|
2014-08-07 23:42:07 -07:00
|
|
|
gs_stagesurface_unmap(video->mapped_surface);
|
2014-02-22 19:14:19 -08:00
|
|
|
video->mapped_surface = NULL;
|
|
|
|
}
|
2013-11-20 14:00:16 -08:00
|
|
|
|
2014-02-22 19:14:19 -08:00
|
|
|
for (size_t i = 0; i < NUM_TEXTURES; i++) {
|
2014-08-07 23:42:07 -07:00
|
|
|
gs_stagesurface_destroy(video->copy_surfaces[i]);
|
|
|
|
gs_texture_destroy(video->render_textures[i]);
|
|
|
|
gs_texture_destroy(video->convert_textures[i]);
|
|
|
|
gs_texture_destroy(video->output_textures[i]);
|
2013-11-26 21:20:11 -08:00
|
|
|
|
2014-02-16 18:28:21 -08:00
|
|
|
video->copy_surfaces[i] = NULL;
|
|
|
|
video->render_textures[i] = NULL;
|
|
|
|
video->convert_textures[i] = NULL;
|
|
|
|
video->output_textures[i] = NULL;
|
2013-11-26 21:20:11 -08:00
|
|
|
}
|
2013-11-20 14:00:16 -08:00
|
|
|
|
2014-08-07 23:42:07 -07:00
|
|
|
gs_leave_context();
|
2014-02-22 19:14:19 -08:00
|
|
|
|
libobs: Redesign/optimize frame encoding handling
Previously, the design for the interaction between the encoder thread
and the graphics thread was that the encoder thread would signal to the
graphics thread when to start drawing each frame. The original idea
behind this was to prevent mutually cascading stalls of encoding or
graphics rendering (i.e., if rendering took too long, then encoding
would have to catch up, then rendering would have to catch up again, and
so on, cascading upon each other). The ultimate goal was to prevent
encoding from impacting graphics and vise versa.
However, eventually it was realized that there were some fundamental
flaws with this design.
1. Stray frame duplication. You could not guarantee that a frame would
render on time, so sometimes frames would unintentionally be lost if
there was any sort of minor hiccup or if the thread took too long to
be scheduled I'm guessing.
2. Frame timing in the rendering thread was less accurate. The only
place where frame timing was accurate was in the encoder thread, and
the graphics thread was at the whim of thread scheduling. On higher
end computers it was typically fine, but it was just generally not
guaranteed that a frame would be rendered when it was supposed to be
rendered.
So the solution (originally proposed by r1ch and paibox) is to instead
keep the encoding and graphics threads separate as usual, but instead of
the encoder thread controlling the graphics thread, the graphics thread
now controls the encoder thread. The encoder thread keeps a limited
cache of frames, then the graphics thread copies frames in to the cache
and increments a semaphore to schedule the encoder thread to encode that
data.
In the cache, each frame has an encode counter. If the frame cache is
full (e.g., the encoder taking too long to return frames), it will not
cache a new frame, but instead will just increment the counter on the
last frame in the cache to schedule that frame to encode again, ensuring
that frames are on time and reducing CPU usage by lowering video
complexity. If the graphics thread takes too long to render a frame,
then it will add that frame with the count value set to the total amount
of frames that were missed (actual legitimately duplicated frames).
Because the cache gives many frames of breathing room for the encoder to
encode frames, this design helps improve results especially when using
encoding presets that have higher complexity and CPU usage, minimizing
the risk of needlessly skipped or duplicated frames.
I also managed to sneak in what should be a bit of an optimization to
reduce copying of frame data, though how much of an optimization it
ultimately ends up being is debatable.
So to sum it up, this commit increases accuracy of frame timing,
completely removes stray frame duplication, gives better results for
higher complexity encoding presets, and potentially optimizes the frame
pipeline a tiny bit.
2014-12-31 01:53:13 -08:00
|
|
|
circlebuf_free(&video->vframe_info_buffer);
|
2014-10-21 20:08:39 -07:00
|
|
|
|
2014-12-31 01:45:39 -08:00
|
|
|
memset(&video->textures_rendered, 0,
|
|
|
|
sizeof(video->textures_rendered));
|
|
|
|
memset(&video->textures_output, 0,
|
|
|
|
sizeof(video->textures_output));
|
|
|
|
memset(&video->textures_copied, 0,
|
|
|
|
sizeof(video->textures_copied));
|
|
|
|
memset(&video->textures_converted, 0,
|
|
|
|
sizeof(video->textures_converted));
|
|
|
|
|
2014-02-22 19:14:19 -08:00
|
|
|
video->cur_texture = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void obs_free_graphics(void)
|
|
|
|
{
|
|
|
|
struct obs_core_video *video = &obs->video;
|
|
|
|
|
|
|
|
if (video->graphics) {
|
2014-08-07 23:42:07 -07:00
|
|
|
gs_enter_context(video->graphics);
|
2014-02-22 19:14:19 -08:00
|
|
|
|
2016-01-03 15:52:56 -08:00
|
|
|
gs_texture_destroy(video->transparent_texture);
|
|
|
|
|
2014-08-07 23:42:07 -07:00
|
|
|
gs_effect_destroy(video->default_effect);
|
2014-10-03 06:56:38 -07:00
|
|
|
gs_effect_destroy(video->default_rect_effect);
|
2015-03-14 00:24:32 -07:00
|
|
|
gs_effect_destroy(video->opaque_effect);
|
2014-08-07 23:42:07 -07:00
|
|
|
gs_effect_destroy(video->solid_effect);
|
|
|
|
gs_effect_destroy(video->conversion_effect);
|
2014-12-14 23:45:44 -08:00
|
|
|
gs_effect_destroy(video->bicubic_effect);
|
|
|
|
gs_effect_destroy(video->lanczos_effect);
|
2015-04-06 07:09:47 -07:00
|
|
|
gs_effect_destroy(video->bilinear_lowres_effect);
|
2013-11-26 21:20:11 -08:00
|
|
|
video->default_effect = NULL;
|
2013-11-20 14:00:16 -08:00
|
|
|
|
2014-08-07 23:42:07 -07:00
|
|
|
gs_leave_context();
|
2013-11-20 14:00:16 -08:00
|
|
|
|
|
|
|
gs_destroy(video->graphics);
|
2013-11-26 21:20:11 -08:00
|
|
|
video->graphics = NULL;
|
2013-11-20 14:00:16 -08:00
|
|
|
}
|
2013-09-30 19:37:13 -07:00
|
|
|
}
|
|
|
|
|
Simplify media i/o interfaces
Completely revamped the entire media i/o data and handlers. The
original idea was to have a system that would have connecting media
inputs and outputs, but at a certain point I realized that this was an
unnecessary complexity for what we wanted to do. (Also, it reminded me
of directshow filters, and I HATE directshow with a passion, and
wouldn't wish it upon my greatest enemy)
Now, audio/video outputs are connected to directly, with better callback
handlers, and will eventually have the ability to automatically handle
conversions such as 4:4:4 to 4:2:0 when connecting to an input that uses
them. Doing this will allow the video/audio i/o handlers to also
prevent duplicate conversion, as well as make it easier/simple to use.
My true goal for this is to make output and encoder plugins as simple to
create as possible. I want to be able to be able to create an output
plugin with almost no real hassle of having to worry about image
conversions, media inputs/outputs, etc. A plugin developer shouldn't
have to handle that sort of stuff when he/she doesn't really need to.
Plugins will be able to simply create a callback via obs_video() and/or
obs_audio(), and they will automatically receive the audio/video data in
the formats requested via a simple callback, without needing to do
almost anything else at all.
2014-01-14 00:58:47 -08:00
|
|
|
static bool obs_init_audio(struct audio_output_info *ai)
|
2013-09-30 19:37:13 -07:00
|
|
|
{
|
2014-02-05 20:03:06 -08:00
|
|
|
struct obs_core_audio *audio = &obs->audio;
|
2013-11-20 14:00:16 -08:00
|
|
|
int errorcode;
|
2013-09-30 19:37:13 -07:00
|
|
|
|
2013-11-20 14:00:16 -08:00
|
|
|
/* TODO: sound subsystem */
|
2013-09-30 19:37:13 -07:00
|
|
|
|
2014-02-20 14:53:16 -08:00
|
|
|
audio->user_volume = 1.0f;
|
|
|
|
|
Simplify media i/o interfaces
Completely revamped the entire media i/o data and handlers. The
original idea was to have a system that would have connecting media
inputs and outputs, but at a certain point I realized that this was an
unnecessary complexity for what we wanted to do. (Also, it reminded me
of directshow filters, and I HATE directshow with a passion, and
wouldn't wish it upon my greatest enemy)
Now, audio/video outputs are connected to directly, with better callback
handlers, and will eventually have the ability to automatically handle
conversions such as 4:4:4 to 4:2:0 when connecting to an input that uses
them. Doing this will allow the video/audio i/o handlers to also
prevent duplicate conversion, as well as make it easier/simple to use.
My true goal for this is to make output and encoder plugins as simple to
create as possible. I want to be able to be able to create an output
plugin with almost no real hassle of having to worry about image
conversions, media inputs/outputs, etc. A plugin developer shouldn't
have to handle that sort of stuff when he/she doesn't really need to.
Plugins will be able to simply create a callback via obs_video() and/or
obs_audio(), and they will automatically receive the audio/video data in
the formats requested via a simple callback, without needing to do
almost anything else at all.
2014-01-14 00:58:47 -08:00
|
|
|
errorcode = audio_output_open(&audio->audio, ai);
|
2013-11-20 14:00:16 -08:00
|
|
|
if (errorcode == AUDIO_OUTPUT_SUCCESS)
|
|
|
|
return true;
|
2013-11-20 14:11:31 -08:00
|
|
|
else if (errorcode == AUDIO_OUTPUT_INVALIDPARAM)
|
2013-11-20 14:00:16 -08:00
|
|
|
blog(LOG_ERROR, "Invalid audio parameters specified");
|
|
|
|
else
|
|
|
|
blog(LOG_ERROR, "Could not open audio output");
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void obs_free_audio(void)
|
|
|
|
{
|
2014-02-05 20:03:06 -08:00
|
|
|
struct obs_core_audio *audio = &obs->audio;
|
2013-11-20 14:00:16 -08:00
|
|
|
if (audio->audio)
|
|
|
|
audio_output_close(audio->audio);
|
|
|
|
|
libobs: Implement new audio subsystem
The new audio subsystem fixes two issues:
- First Primary issue it fixes is the ability for parent sources to
intercept the audio of child sources, and do custom processing on
them. The main reason for this was the ability to do custom
cross-fading in transitions, but it's also useful for things such as
side-chain effects, applying audio effects to entire scenes, applying
scene-specific audio filters on sub-sources, and other such
possibilities.
- The secondary issue that needed fixing was audio buffering.
Previously, audio buffering was always a fixed buffer size, so it
would always have exactly a certain number of milliseconds of audio
buffering (and thus output delay). Instead, it now dynamically
increases audio buffering only as necessary, minimizing output delay,
and removing the need for users to have to worry about an audio
buffering setting.
The new design makes it so that audio from the leaves of the scene graph
flow to the root nodes, and can be intercepted by parent sources. Each
audio source handles its own buffering, and each audio tick a specific
number of audio frames are popped from the front of the circular buffer
on each audio source. Composite sources (such as scenes) can access the
audio for child sources and do custom processing or mixing on that
audio. Composite sources use the audio_render callback of sources to do
synchronous or deferred audio processing per audio tick. Things like
scenes now mix audio from their sub-sources.
2015-12-20 03:06:35 -08:00
|
|
|
circlebuf_free(&audio->buffered_timestamps);
|
|
|
|
da_free(audio->render_order);
|
|
|
|
da_free(audio->root_nodes);
|
|
|
|
|
2014-02-05 20:03:06 -08:00
|
|
|
memset(audio, 0, sizeof(struct obs_core_audio));
|
2013-11-20 14:00:16 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
static bool obs_init_data(void)
|
|
|
|
{
|
2014-02-05 20:03:06 -08:00
|
|
|
struct obs_core_data *data = &obs->data;
|
2014-01-08 17:07:04 -08:00
|
|
|
pthread_mutexattr_t attr;
|
2013-11-20 14:00:16 -08:00
|
|
|
|
2014-04-14 13:55:14 -07:00
|
|
|
assert(data != NULL);
|
|
|
|
|
2013-11-20 14:00:16 -08:00
|
|
|
pthread_mutex_init_value(&obs->data.displays_mutex);
|
|
|
|
|
2014-01-08 17:07:04 -08:00
|
|
|
if (pthread_mutexattr_init(&attr) != 0)
|
2013-10-14 12:37:52 -07:00
|
|
|
return false;
|
2014-01-08 17:07:04 -08:00
|
|
|
if (pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE) != 0)
|
|
|
|
goto fail;
|
|
|
|
if (pthread_mutex_init(&data->sources_mutex, &attr) != 0)
|
|
|
|
goto fail;
|
2015-12-17 04:00:14 -08:00
|
|
|
if (pthread_mutex_init(&data->audio_sources_mutex, &attr) != 0)
|
|
|
|
goto fail;
|
2014-01-08 17:07:04 -08:00
|
|
|
if (pthread_mutex_init(&data->displays_mutex, &attr) != 0)
|
|
|
|
goto fail;
|
Add preliminary output/encoder interface
- First, I redid the output interface for libobs. I feel like it's
going in a pretty good direction in terms of design.
Right now, the design is so that outputs and encoders are separate.
One or more outputs can connect to a specific encoder to receive its
data, or the output can connect directly to raw data from libobs
output itself, if the output doesn't want to use a designated encoder.
Data is received via callbacks set when you connect to the encoder or
raw output. Multiple outputs can receive the data from a single
encoder context if need be (such as for streaming to multiple channels
at once, and/or recording with the same data).
When an encoder is first connected to, it will connect to raw output,
and start encoding. Additional connections will receive that same
data being encoded as well after that. When the last encoder has
disconnected, it will stop encoding. If for some reason the encoder
needs to stop, it will use the callback with NULL to signal that
encoding has stopped. Some of these things may be subject to change
in the future, though it feels pretty good with this design so far.
Will have to see how well it works out in practice versus theory.
- Second, Started adding preliminary RTMP/x264 output plugin code.
To speed things up, I might just make a direct raw->FFmpeg output to
create a quick output plugin that we can start using for testing all
the subsystems.
2014-01-16 21:34:51 -08:00
|
|
|
if (pthread_mutex_init(&data->outputs_mutex, &attr) != 0)
|
|
|
|
goto fail;
|
|
|
|
if (pthread_mutex_init(&data->encoders_mutex, &attr) != 0)
|
|
|
|
goto fail;
|
libobs: Add services API, reduce repeated code
Add API for streaming services. The services API simplifies the
creation of custom service features and user interface.
Custom streaming services later on will be able to do things such as:
- Be able to use service-specific APIs via modules, allowing a more
direct means of communicating with the service and requesting or
setting service-specific information
- Get URL/stream key via other means of authentication such as OAuth,
or be able to build custom URLs for services that require that sort
of thing.
- Query information (such as viewer count, chat, follower
notifications, and other information)
- Set channel information (such as current game, current channel title,
activating commercials)
Also, I reduce some repeated code that was used for all libobs objects.
This includes the name of the object, the private data, settings, as
well as the signal and procedure handlers.
I also switched to using linked lists for the global object lists,
rather than using an array of pointers (you could say it was..
pointless.) ..Anyway, the linked list info is also stored in the shared
context data structure.
2014-04-19 20:38:53 -07:00
|
|
|
if (pthread_mutex_init(&data->services_mutex, &attr) != 0)
|
|
|
|
goto fail;
|
2014-02-13 09:21:16 -08:00
|
|
|
if (!obs_view_init(&data->main_view))
|
2014-02-13 07:58:31 -08:00
|
|
|
goto fail;
|
2013-09-30 19:37:13 -07:00
|
|
|
|
2014-01-23 16:00:42 -08:00
|
|
|
data->valid = true;
|
2014-01-08 17:07:04 -08:00
|
|
|
|
|
|
|
fail:
|
|
|
|
pthread_mutexattr_destroy(&attr);
|
2014-01-23 16:00:42 -08:00
|
|
|
return data->valid;
|
2013-09-30 19:37:13 -07:00
|
|
|
}
|
|
|
|
|
2015-03-02 00:30:40 -08:00
|
|
|
void obs_main_view_free(struct obs_view *view)
|
|
|
|
{
|
|
|
|
if (!view) return;
|
|
|
|
|
|
|
|
for (size_t i = 0; i < MAX_CHANNELS; i++)
|
|
|
|
obs_source_release(view->channels[i]);
|
|
|
|
|
|
|
|
memset(view->channels, 0, sizeof(view->channels));
|
|
|
|
pthread_mutex_destroy(&view->channels_mutex);
|
|
|
|
}
|
|
|
|
|
libobs: Add services API, reduce repeated code
Add API for streaming services. The services API simplifies the
creation of custom service features and user interface.
Custom streaming services later on will be able to do things such as:
- Be able to use service-specific APIs via modules, allowing a more
direct means of communicating with the service and requesting or
setting service-specific information
- Get URL/stream key via other means of authentication such as OAuth,
or be able to build custom URLs for services that require that sort
of thing.
- Query information (such as viewer count, chat, follower
notifications, and other information)
- Set channel information (such as current game, current channel title,
activating commercials)
Also, I reduce some repeated code that was used for all libobs objects.
This includes the name of the object, the private data, settings, as
well as the signal and procedure handlers.
I also switched to using linked lists for the global object lists,
rather than using an array of pointers (you could say it was..
pointless.) ..Anyway, the linked list info is also stored in the shared
context data structure.
2014-04-19 20:38:53 -07:00
|
|
|
#define FREE_OBS_LINKED_LIST(type) \
|
|
|
|
do { \
|
|
|
|
int unfreed = 0; \
|
|
|
|
while (data->first_ ## type ) { \
|
|
|
|
obs_ ## type ## _destroy(data->first_ ## type ); \
|
|
|
|
unfreed++; \
|
|
|
|
} \
|
|
|
|
if (unfreed) \
|
|
|
|
blog(LOG_INFO, "\t%d " #type "(s) were remaining", \
|
|
|
|
unfreed); \
|
|
|
|
} while (false)
|
|
|
|
|
2013-11-20 14:00:16 -08:00
|
|
|
static void obs_free_data(void)
|
2013-09-30 19:37:13 -07:00
|
|
|
{
|
2014-02-05 20:03:06 -08:00
|
|
|
struct obs_core_data *data = &obs->data;
|
2013-09-30 19:37:13 -07:00
|
|
|
|
2014-01-23 16:00:42 -08:00
|
|
|
data->valid = false;
|
|
|
|
|
2015-03-02 00:30:40 -08:00
|
|
|
obs_main_view_free(&data->main_view);
|
2013-11-20 14:00:16 -08:00
|
|
|
|
libobs: Add services API, reduce repeated code
Add API for streaming services. The services API simplifies the
creation of custom service features and user interface.
Custom streaming services later on will be able to do things such as:
- Be able to use service-specific APIs via modules, allowing a more
direct means of communicating with the service and requesting or
setting service-specific information
- Get URL/stream key via other means of authentication such as OAuth,
or be able to build custom URLs for services that require that sort
of thing.
- Query information (such as viewer count, chat, follower
notifications, and other information)
- Set channel information (such as current game, current channel title,
activating commercials)
Also, I reduce some repeated code that was used for all libobs objects.
This includes the name of the object, the private data, settings, as
well as the signal and procedure handlers.
I also switched to using linked lists for the global object lists,
rather than using an array of pointers (you could say it was..
pointless.) ..Anyway, the linked list info is also stored in the shared
context data structure.
2014-04-19 20:38:53 -07:00
|
|
|
blog(LOG_INFO, "Freeing OBS context data");
|
2013-10-14 12:37:52 -07:00
|
|
|
|
libobs: Add services API, reduce repeated code
Add API for streaming services. The services API simplifies the
creation of custom service features and user interface.
Custom streaming services later on will be able to do things such as:
- Be able to use service-specific APIs via modules, allowing a more
direct means of communicating with the service and requesting or
setting service-specific information
- Get URL/stream key via other means of authentication such as OAuth,
or be able to build custom URLs for services that require that sort
of thing.
- Query information (such as viewer count, chat, follower
notifications, and other information)
- Set channel information (such as current game, current channel title,
activating commercials)
Also, I reduce some repeated code that was used for all libobs objects.
This includes the name of the object, the private data, settings, as
well as the signal and procedure handlers.
I also switched to using linked lists for the global object lists,
rather than using an array of pointers (you could say it was..
pointless.) ..Anyway, the linked list info is also stored in the shared
context data structure.
2014-04-19 20:38:53 -07:00
|
|
|
FREE_OBS_LINKED_LIST(source);
|
|
|
|
FREE_OBS_LINKED_LIST(output);
|
|
|
|
FREE_OBS_LINKED_LIST(encoder);
|
|
|
|
FREE_OBS_LINKED_LIST(display);
|
|
|
|
FREE_OBS_LINKED_LIST(service);
|
|
|
|
|
Add preliminary output/encoder interface
- First, I redid the output interface for libobs. I feel like it's
going in a pretty good direction in terms of design.
Right now, the design is so that outputs and encoders are separate.
One or more outputs can connect to a specific encoder to receive its
data, or the output can connect directly to raw data from libobs
output itself, if the output doesn't want to use a designated encoder.
Data is received via callbacks set when you connect to the encoder or
raw output. Multiple outputs can receive the data from a single
encoder context if need be (such as for streaming to multiple channels
at once, and/or recording with the same data).
When an encoder is first connected to, it will connect to raw output,
and start encoding. Additional connections will receive that same
data being encoded as well after that. When the last encoder has
disconnected, it will stop encoding. If for some reason the encoder
needs to stop, it will use the callback with NULL to signal that
encoding has stopped. Some of these things may be subject to change
in the future, though it feels pretty good with this design so far.
Will have to see how well it works out in practice versus theory.
- Second, Started adding preliminary RTMP/x264 output plugin code.
To speed things up, I might just make a direct raw->FFmpeg output to
create a quick output plugin that we can start using for testing all
the subsystems.
2014-01-16 21:34:51 -08:00
|
|
|
pthread_mutex_destroy(&data->sources_mutex);
|
2015-12-17 04:00:14 -08:00
|
|
|
pthread_mutex_destroy(&data->audio_sources_mutex);
|
Add preliminary output/encoder interface
- First, I redid the output interface for libobs. I feel like it's
going in a pretty good direction in terms of design.
Right now, the design is so that outputs and encoders are separate.
One or more outputs can connect to a specific encoder to receive its
data, or the output can connect directly to raw data from libobs
output itself, if the output doesn't want to use a designated encoder.
Data is received via callbacks set when you connect to the encoder or
raw output. Multiple outputs can receive the data from a single
encoder context if need be (such as for streaming to multiple channels
at once, and/or recording with the same data).
When an encoder is first connected to, it will connect to raw output,
and start encoding. Additional connections will receive that same
data being encoded as well after that. When the last encoder has
disconnected, it will stop encoding. If for some reason the encoder
needs to stop, it will use the callback with NULL to signal that
encoding has stopped. Some of these things may be subject to change
in the future, though it feels pretty good with this design so far.
Will have to see how well it works out in practice versus theory.
- Second, Started adding preliminary RTMP/x264 output plugin code.
To speed things up, I might just make a direct raw->FFmpeg output to
create a quick output plugin that we can start using for testing all
the subsystems.
2014-01-16 21:34:51 -08:00
|
|
|
pthread_mutex_destroy(&data->displays_mutex);
|
|
|
|
pthread_mutex_destroy(&data->outputs_mutex);
|
|
|
|
pthread_mutex_destroy(&data->encoders_mutex);
|
libobs: Add services API, reduce repeated code
Add API for streaming services. The services API simplifies the
creation of custom service features and user interface.
Custom streaming services later on will be able to do things such as:
- Be able to use service-specific APIs via modules, allowing a more
direct means of communicating with the service and requesting or
setting service-specific information
- Get URL/stream key via other means of authentication such as OAuth,
or be able to build custom URLs for services that require that sort
of thing.
- Query information (such as viewer count, chat, follower
notifications, and other information)
- Set channel information (such as current game, current channel title,
activating commercials)
Also, I reduce some repeated code that was used for all libobs objects.
This includes the name of the object, the private data, settings, as
well as the signal and procedure handlers.
I also switched to using linked lists for the global object lists,
rather than using an array of pointers (you could say it was..
pointless.) ..Anyway, the linked list info is also stored in the shared
context data structure.
2014-04-19 20:38:53 -07:00
|
|
|
pthread_mutex_destroy(&data->services_mutex);
|
2013-11-20 14:00:16 -08:00
|
|
|
}
|
2013-09-30 19:37:13 -07:00
|
|
|
|
2014-03-01 04:54:55 -08:00
|
|
|
static const char *obs_signals[] = {
|
|
|
|
"void source_create(ptr source)",
|
|
|
|
"void source_destroy(ptr source)",
|
|
|
|
"void source_remove(ptr source)",
|
2015-10-28 12:38:47 -07:00
|
|
|
"void source_save(ptr source)",
|
|
|
|
"void source_load(ptr source)",
|
2014-03-01 04:54:55 -08:00
|
|
|
"void source_activate(ptr source)",
|
|
|
|
"void source_deactivate(ptr source)",
|
|
|
|
"void source_show(ptr source)",
|
|
|
|
"void source_hide(ptr source)",
|
2014-06-30 00:05:35 -07:00
|
|
|
"void source_rename(ptr source, string new_name, string prev_name)",
|
2014-03-01 04:54:55 -08:00
|
|
|
"void source_volume(ptr source, in out float volume)",
|
2014-06-03 04:43:00 -07:00
|
|
|
"void source_volume_level(ptr source, float level, float magnitude, "
|
|
|
|
"float peak)",
|
libobs: Implement transition sources
Transition sources are implemented by registering a source type as
OBS_SOURCE_TYPE_TRANSITION. They're automatically marked as video
composite sources, and video_render/audio_render callbacks must be set
when registering the source. get_width and get_height callbacks are
unused for these types of sources, as transitions automatically handle
width/height behind the scenes with the transition settings.
In the video_render callback, the helper function
obs_transition_video_render is used to assist in automatically
processing and rendering the audio. A render callback is passed to the
function, which in turn passes to/from textures that are automatically
rendered in the back-end.
Similarly, in the audio_render callback, the helper function
obs_transition_audio_render is used to assist in automatically
processing and rendering the audio. Two mix callbacks are used to
handle how the source/destination sources are mixed together. To ensure
the best possible quality, audio processing is per-sample.
Transitions can be set to automatically resize, or they can be set to
have a fixed size. Sources within transitions can be made to scale to
the transition size (with or without aspect ratio), or to not scale
unless they're bigger than the transition. They can have a specific
alignment within the transition, or they just default to top-left.
These features are implemented for the purpose of extending transitions
to also act as "switch" sources later, where you can switch to/from two
different sources using the transition animation.
Planned (but not yet implemented and lower priority) features:
- "Switch" transitions which allow the ability to switch back and forth
between two sources with a transitioning animation without discarding
the references
- Easing options to allow the option to transition with a bezier or
custom curve
- Manual transitioning to allow the front-end/user to manually control
the transition offset
2016-01-03 16:41:14 -08:00
|
|
|
"void source_transition_start(ptr source)",
|
|
|
|
"void source_transition_video_stop(ptr source)",
|
|
|
|
"void source_transition_stop(ptr source)",
|
2014-03-01 04:54:55 -08:00
|
|
|
|
|
|
|
"void channel_change(int channel, in out ptr source, ptr prev_source)",
|
|
|
|
"void master_volume(in out float volume)",
|
|
|
|
|
2014-11-01 13:41:17 -07:00
|
|
|
"void hotkey_layout_change()",
|
|
|
|
"void hotkey_register(ptr hotkey)",
|
|
|
|
"void hotkey_unregister(ptr hotkey)",
|
|
|
|
"void hotkey_bindings_changed(ptr hotkey)",
|
|
|
|
|
2014-03-01 04:54:55 -08:00
|
|
|
NULL
|
|
|
|
};
|
|
|
|
|
2013-12-26 22:10:15 -08:00
|
|
|
static inline bool obs_init_handlers(void)
|
|
|
|
{
|
|
|
|
obs->signals = signal_handler_create();
|
|
|
|
if (!obs->signals)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
obs->procs = proc_handler_create();
|
2014-03-01 04:54:55 -08:00
|
|
|
if (!obs->procs)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
return signal_handler_add_array(obs->signals, obs_signals);
|
2013-12-26 22:10:15 -08:00
|
|
|
}
|
|
|
|
|
2014-11-01 13:41:17 -07:00
|
|
|
static pthread_once_t obs_pthread_once_init_token = PTHREAD_ONCE_INIT;
|
|
|
|
static inline bool obs_init_hotkeys(void)
|
|
|
|
{
|
|
|
|
struct obs_core_hotkeys *hotkeys = &obs->hotkeys;
|
|
|
|
pthread_mutexattr_t attr;
|
|
|
|
bool success = false;
|
|
|
|
|
|
|
|
assert(hotkeys != NULL);
|
|
|
|
|
|
|
|
da_init(hotkeys->hotkeys);
|
|
|
|
hotkeys->signals = obs->signals;
|
|
|
|
hotkeys->name_map_init_token = obs_pthread_once_init_token;
|
2015-04-30 18:22:12 -07:00
|
|
|
hotkeys->mute = bstrdup("Mute");
|
|
|
|
hotkeys->unmute = bstrdup("Unmute");
|
|
|
|
hotkeys->push_to_mute = bstrdup("Push-to-mute");
|
|
|
|
hotkeys->push_to_talk = bstrdup("Push-to-talk");
|
2015-05-08 18:33:55 -07:00
|
|
|
hotkeys->sceneitem_show = bstrdup("Show '%1'");
|
|
|
|
hotkeys->sceneitem_hide = bstrdup("Hide '%1'");
|
2014-11-01 13:41:17 -07:00
|
|
|
|
|
|
|
if (!obs_hotkeys_platform_init(hotkeys))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
if (pthread_mutexattr_init(&attr) != 0)
|
|
|
|
return false;
|
|
|
|
if (pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE) != 0)
|
|
|
|
goto fail;
|
|
|
|
if (pthread_mutex_init(&hotkeys->mutex, &attr) != 0)
|
|
|
|
goto fail;
|
|
|
|
|
|
|
|
if (os_event_init(&hotkeys->stop_event, OS_EVENT_TYPE_MANUAL) != 0)
|
|
|
|
goto fail;
|
|
|
|
if (pthread_create(&hotkeys->hotkey_thread, NULL,
|
|
|
|
obs_hotkey_thread, NULL))
|
|
|
|
goto fail;
|
|
|
|
|
|
|
|
hotkeys->hotkey_thread_initialized = true;
|
|
|
|
|
|
|
|
success = true;
|
|
|
|
|
|
|
|
fail:
|
|
|
|
pthread_mutexattr_destroy(&attr);
|
|
|
|
return success;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void stop_hotkeys(void)
|
|
|
|
{
|
|
|
|
struct obs_core_hotkeys *hotkeys = &obs->hotkeys;
|
|
|
|
void *thread_ret;
|
|
|
|
|
|
|
|
if (hotkeys->hotkey_thread_initialized) {
|
|
|
|
os_event_signal(hotkeys->stop_event);
|
|
|
|
pthread_join(hotkeys->hotkey_thread, &thread_ret);
|
|
|
|
hotkeys->hotkey_thread_initialized = false;
|
|
|
|
}
|
|
|
|
|
|
|
|
os_event_destroy(hotkeys->stop_event);
|
|
|
|
obs_hotkeys_free();
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void obs_free_hotkeys(void)
|
|
|
|
{
|
|
|
|
struct obs_core_hotkeys *hotkeys = &obs->hotkeys;
|
|
|
|
|
2015-04-30 18:22:12 -07:00
|
|
|
bfree(hotkeys->mute);
|
|
|
|
bfree(hotkeys->unmute);
|
|
|
|
bfree(hotkeys->push_to_mute);
|
|
|
|
bfree(hotkeys->push_to_talk);
|
2015-05-08 18:33:55 -07:00
|
|
|
bfree(hotkeys->sceneitem_show);
|
|
|
|
bfree(hotkeys->sceneitem_hide);
|
2015-04-30 18:22:12 -07:00
|
|
|
|
2014-11-01 13:41:17 -07:00
|
|
|
obs_hotkey_name_map_free();
|
|
|
|
|
|
|
|
obs_hotkeys_platform_free(hotkeys);
|
|
|
|
pthread_mutex_destroy(&hotkeys->mutex);
|
|
|
|
}
|
|
|
|
|
2014-04-26 23:47:50 -07:00
|
|
|
extern const struct obs_source_info scene_info;
|
|
|
|
|
2014-07-12 00:21:06 -07:00
|
|
|
extern void log_system_info(void);
|
|
|
|
|
2015-08-09 05:09:07 -07:00
|
|
|
static bool obs_init(const char *locale, const char *module_config_path,
|
|
|
|
profiler_name_store_t *store)
|
2013-11-20 14:00:16 -08:00
|
|
|
{
|
2014-02-09 11:34:07 -08:00
|
|
|
obs = bzalloc(sizeof(struct obs_core));
|
2013-10-14 12:37:52 -07:00
|
|
|
|
2015-08-02 03:06:00 -07:00
|
|
|
obs->name_store_owned = !store;
|
|
|
|
obs->name_store = store ? store : profiler_name_store_create();
|
|
|
|
if (!obs->name_store) {
|
|
|
|
blog(LOG_ERROR, "Couldn't create profiler name store");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2014-07-12 00:21:06 -07:00
|
|
|
log_system_info();
|
|
|
|
|
2014-04-26 23:47:50 -07:00
|
|
|
if (!obs_init_data())
|
|
|
|
return false;
|
|
|
|
if (!obs_init_handlers())
|
|
|
|
return false;
|
2014-11-01 13:41:17 -07:00
|
|
|
if (!obs_init_hotkeys())
|
|
|
|
return false;
|
2014-04-26 23:47:50 -07:00
|
|
|
|
2015-08-09 05:09:07 -07:00
|
|
|
if (module_config_path)
|
|
|
|
obs->module_config_path = bstrdup(module_config_path);
|
2014-06-25 00:21:16 -07:00
|
|
|
obs->locale = bstrdup(locale);
|
2014-04-26 23:47:50 -07:00
|
|
|
obs_register_source(&scene_info);
|
(API Change) Refactor module handling
Changed API:
- char *obs_find_plugin_file(const char *sub_path);
Changed to: char *obs_module_file(const char *file);
Cahnge it so you no longer need to specify a sub-path such as:
obs_find_plugin_file("module_name/file.ext")
Instead, now automatically handle the module data path so all you need
to do is:
obs_module_file("file.ext")
- int obs_load_module(const char *name);
Changed to: int obs_open_module(obs_module_t *module,
const char *path,
const char *data_path);
bool obs_init_module(obs_module_t module);
Change the module loading API so that if the front-end chooses, it can
load modules directly from a specified path, and associate a data
directory with it on the spot.
The module will not be initialized immediately; obs_init_module must
be called on the module pointer in order to fully initialize the
module. This is done so a module can be disabled by the front-end if
the it so chooses.
New API:
- void obs_add_module_path(const char *bin, const char *data);
These functions allow you to specify new module search paths to add,
and allow you to search through them, or optionally just load all
modules from them. If the string %module% is included, it will
replace it with the module's name when that string is used as a
lookup. Data paths are now directly added to the module's internal
storage structure, and when obs_find_module_file is used, it will look
up the pointer to the obs_module structure and get its data directory
that way.
Example:
obs_add_module_path("/opt/obs/my-modules/%module%/bin",
"/opt/obs/my-modules/%module%/data");
This would cause it to additionally look for the binary of a
hypthetical module named "foo" at /opt/obs/my-modules/foo/bin/foo.so
(or libfoo.so), and then look for the data in
/opt/obs/my-modules/foo/data.
This gives the front-end more flexibility for handling third-party
plugin modules, or handling all plugin modules in a custom way.
- void obs_find_modules(obs_find_module_callback_t callback, void
*param);
This searches the existing paths for modules and calls the callback
function when any are found. Useful for plugin management and custom
handling of the paths by the front-end if desired.
- void obs_load_all_modules(void);
Search through the paths and both loads and initializes all modules
automatically without custom handling.
- void obs_enum_modules(obs_enum_module_callback_t callback,
void *param);
Enumerates currently opened modules.
2014-07-27 12:00:11 -07:00
|
|
|
add_default_module_paths();
|
2014-04-26 23:47:50 -07:00
|
|
|
return true;
|
2013-09-30 19:37:13 -07:00
|
|
|
}
|
|
|
|
|
2015-01-02 05:57:06 -08:00
|
|
|
#ifdef _WIN32
|
|
|
|
extern void initialize_crash_handler(void);
|
2015-09-22 11:27:38 -07:00
|
|
|
extern void initialize_com(void);
|
|
|
|
extern void uninitialize_com(void);
|
2015-01-02 05:57:06 -08:00
|
|
|
#endif
|
|
|
|
|
2015-07-10 23:04:46 -07:00
|
|
|
static const char *obs_startup_name = "obs_startup";
|
2015-08-09 05:09:07 -07:00
|
|
|
bool obs_startup(const char *locale, const char *module_config_path,
|
|
|
|
profiler_name_store_t *store)
|
2013-09-30 19:37:13 -07:00
|
|
|
{
|
2013-11-20 14:00:16 -08:00
|
|
|
bool success;
|
2013-09-30 19:37:13 -07:00
|
|
|
|
2015-07-10 23:04:46 -07:00
|
|
|
profile_start(obs_startup_name);
|
|
|
|
|
2013-11-20 14:00:16 -08:00
|
|
|
if (obs) {
|
2014-02-23 21:39:33 -08:00
|
|
|
blog(LOG_WARNING, "Tried to call obs_startup more than once");
|
2013-11-20 14:00:16 -08:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2015-01-02 05:57:06 -08:00
|
|
|
#ifdef _WIN32
|
|
|
|
initialize_crash_handler();
|
2015-09-22 11:27:38 -07:00
|
|
|
initialize_com();
|
2015-01-02 05:57:06 -08:00
|
|
|
#endif
|
|
|
|
|
2015-08-09 05:09:07 -07:00
|
|
|
success = obs_init(locale, module_config_path, store);
|
2015-07-10 23:04:46 -07:00
|
|
|
profile_end(obs_startup_name);
|
2013-11-20 14:00:16 -08:00
|
|
|
if (!success)
|
|
|
|
obs_shutdown();
|
|
|
|
|
|
|
|
return success;
|
2013-09-30 19:37:13 -07:00
|
|
|
}
|
|
|
|
|
2013-11-20 14:00:16 -08:00
|
|
|
void obs_shutdown(void)
|
2013-09-30 19:37:13 -07:00
|
|
|
{
|
(API Change) Refactor module handling
Changed API:
- char *obs_find_plugin_file(const char *sub_path);
Changed to: char *obs_module_file(const char *file);
Cahnge it so you no longer need to specify a sub-path such as:
obs_find_plugin_file("module_name/file.ext")
Instead, now automatically handle the module data path so all you need
to do is:
obs_module_file("file.ext")
- int obs_load_module(const char *name);
Changed to: int obs_open_module(obs_module_t *module,
const char *path,
const char *data_path);
bool obs_init_module(obs_module_t module);
Change the module loading API so that if the front-end chooses, it can
load modules directly from a specified path, and associate a data
directory with it on the spot.
The module will not be initialized immediately; obs_init_module must
be called on the module pointer in order to fully initialize the
module. This is done so a module can be disabled by the front-end if
the it so chooses.
New API:
- void obs_add_module_path(const char *bin, const char *data);
These functions allow you to specify new module search paths to add,
and allow you to search through them, or optionally just load all
modules from them. If the string %module% is included, it will
replace it with the module's name when that string is used as a
lookup. Data paths are now directly added to the module's internal
storage structure, and when obs_find_module_file is used, it will look
up the pointer to the obs_module structure and get its data directory
that way.
Example:
obs_add_module_path("/opt/obs/my-modules/%module%/bin",
"/opt/obs/my-modules/%module%/data");
This would cause it to additionally look for the binary of a
hypthetical module named "foo" at /opt/obs/my-modules/foo/bin/foo.so
(or libfoo.so), and then look for the data in
/opt/obs/my-modules/foo/data.
This gives the front-end more flexibility for handling third-party
plugin modules, or handling all plugin modules in a custom way.
- void obs_find_modules(obs_find_module_callback_t callback, void
*param);
This searches the existing paths for modules and calls the callback
function when any are found. Useful for plugin management and custom
handling of the paths by the front-end if desired.
- void obs_load_all_modules(void);
Search through the paths and both loads and initializes all modules
automatically without custom handling.
- void obs_enum_modules(obs_enum_module_callback_t callback,
void *param);
Enumerates currently opened modules.
2014-07-27 12:00:11 -07:00
|
|
|
struct obs_module *module;
|
|
|
|
|
2013-09-30 19:37:13 -07:00
|
|
|
if (!obs)
|
|
|
|
return;
|
|
|
|
|
2015-09-15 22:39:11 -07:00
|
|
|
#define FREE_REGISTERED_TYPES(structure, list) \
|
|
|
|
do { \
|
|
|
|
for (size_t i = 0; i < list.num; i++) { \
|
|
|
|
struct structure *item = &list.array[i]; \
|
|
|
|
if (item->type_data && item->free_type_data) \
|
|
|
|
item->free_type_data(item->type_data); \
|
|
|
|
} \
|
|
|
|
da_free(list); \
|
|
|
|
} while (false)
|
|
|
|
|
2015-12-29 15:25:45 -08:00
|
|
|
FREE_REGISTERED_TYPES(obs_source_info, obs->source_types);
|
2015-09-15 22:39:11 -07:00
|
|
|
FREE_REGISTERED_TYPES(obs_source_info, obs->input_types);
|
|
|
|
FREE_REGISTERED_TYPES(obs_source_info, obs->filter_types);
|
|
|
|
FREE_REGISTERED_TYPES(obs_source_info, obs->transition_types);
|
|
|
|
FREE_REGISTERED_TYPES(obs_output_info, obs->output_types);
|
|
|
|
FREE_REGISTERED_TYPES(obs_encoder_info, obs->encoder_types);
|
|
|
|
FREE_REGISTERED_TYPES(obs_service_info, obs->service_types);
|
|
|
|
FREE_REGISTERED_TYPES(obs_modal_ui, obs->modal_ui_callbacks);
|
|
|
|
FREE_REGISTERED_TYPES(obs_modeless_ui, obs->modeless_ui_callbacks);
|
|
|
|
|
|
|
|
#undef FREE_REGISTERED_TYPES
|
2013-09-30 19:37:13 -07:00
|
|
|
|
2014-02-13 07:58:31 -08:00
|
|
|
stop_video();
|
2014-11-01 13:41:17 -07:00
|
|
|
stop_hotkeys();
|
2014-02-13 07:58:31 -08:00
|
|
|
|
libobs: Implement new audio subsystem
The new audio subsystem fixes two issues:
- First Primary issue it fixes is the ability for parent sources to
intercept the audio of child sources, and do custom processing on
them. The main reason for this was the ability to do custom
cross-fading in transitions, but it's also useful for things such as
side-chain effects, applying audio effects to entire scenes, applying
scene-specific audio filters on sub-sources, and other such
possibilities.
- The secondary issue that needed fixing was audio buffering.
Previously, audio buffering was always a fixed buffer size, so it
would always have exactly a certain number of milliseconds of audio
buffering (and thus output delay). Instead, it now dynamically
increases audio buffering only as necessary, minimizing output delay,
and removing the need for users to have to worry about an audio
buffering setting.
The new design makes it so that audio from the leaves of the scene graph
flow to the root nodes, and can be intercepted by parent sources. Each
audio source handles its own buffering, and each audio tick a specific
number of audio frames are popped from the front of the circular buffer
on each audio source. Composite sources (such as scenes) can access the
audio for child sources and do custom processing or mixing on that
audio. Composite sources use the audio_render callback of sources to do
synchronous or deferred audio processing per audio tick. Things like
scenes now mix audio from their sub-sources.
2015-12-20 03:06:35 -08:00
|
|
|
obs_free_audio();
|
2013-11-20 14:00:16 -08:00
|
|
|
obs_free_data();
|
|
|
|
obs_free_video();
|
2014-11-01 13:41:17 -07:00
|
|
|
obs_free_hotkeys();
|
2013-11-26 21:20:11 -08:00
|
|
|
obs_free_graphics();
|
2013-12-26 22:10:15 -08:00
|
|
|
proc_handler_destroy(obs->procs);
|
|
|
|
signal_handler_destroy(obs->signals);
|
2013-09-30 19:37:13 -07:00
|
|
|
|
(API Change) Refactor module handling
Changed API:
- char *obs_find_plugin_file(const char *sub_path);
Changed to: char *obs_module_file(const char *file);
Cahnge it so you no longer need to specify a sub-path such as:
obs_find_plugin_file("module_name/file.ext")
Instead, now automatically handle the module data path so all you need
to do is:
obs_module_file("file.ext")
- int obs_load_module(const char *name);
Changed to: int obs_open_module(obs_module_t *module,
const char *path,
const char *data_path);
bool obs_init_module(obs_module_t module);
Change the module loading API so that if the front-end chooses, it can
load modules directly from a specified path, and associate a data
directory with it on the spot.
The module will not be initialized immediately; obs_init_module must
be called on the module pointer in order to fully initialize the
module. This is done so a module can be disabled by the front-end if
the it so chooses.
New API:
- void obs_add_module_path(const char *bin, const char *data);
These functions allow you to specify new module search paths to add,
and allow you to search through them, or optionally just load all
modules from them. If the string %module% is included, it will
replace it with the module's name when that string is used as a
lookup. Data paths are now directly added to the module's internal
storage structure, and when obs_find_module_file is used, it will look
up the pointer to the obs_module structure and get its data directory
that way.
Example:
obs_add_module_path("/opt/obs/my-modules/%module%/bin",
"/opt/obs/my-modules/%module%/data");
This would cause it to additionally look for the binary of a
hypthetical module named "foo" at /opt/obs/my-modules/foo/bin/foo.so
(or libfoo.so), and then look for the data in
/opt/obs/my-modules/foo/data.
This gives the front-end more flexibility for handling third-party
plugin modules, or handling all plugin modules in a custom way.
- void obs_find_modules(obs_find_module_callback_t callback, void
*param);
This searches the existing paths for modules and calls the callback
function when any are found. Useful for plugin management and custom
handling of the paths by the front-end if desired.
- void obs_load_all_modules(void);
Search through the paths and both loads and initializes all modules
automatically without custom handling.
- void obs_enum_modules(obs_enum_module_callback_t callback,
void *param);
Enumerates currently opened modules.
2014-07-27 12:00:11 -07:00
|
|
|
module = obs->first_module;
|
|
|
|
while (module) {
|
|
|
|
struct obs_module *next = module->next;
|
|
|
|
free_module(module);
|
|
|
|
module = next;
|
|
|
|
}
|
|
|
|
obs->first_module = NULL;
|
|
|
|
|
|
|
|
for (size_t i = 0; i < obs->module_paths.num; i++)
|
|
|
|
free_module_path(obs->module_paths.array+i);
|
|
|
|
da_free(obs->module_paths);
|
2013-09-30 19:37:13 -07:00
|
|
|
|
2015-08-02 03:06:00 -07:00
|
|
|
if (obs->name_store_owned)
|
|
|
|
profiler_name_store_free(obs->name_store);
|
|
|
|
|
2015-08-09 05:09:07 -07:00
|
|
|
bfree(obs->module_config_path);
|
2014-06-25 00:21:16 -07:00
|
|
|
bfree(obs->locale);
|
2013-09-30 19:37:13 -07:00
|
|
|
bfree(obs);
|
2013-10-14 12:37:52 -07:00
|
|
|
obs = NULL;
|
2015-09-22 11:27:38 -07:00
|
|
|
|
|
|
|
#ifdef _WIN32
|
|
|
|
uninitialize_com();
|
|
|
|
#endif
|
2013-10-14 12:37:52 -07:00
|
|
|
}
|
|
|
|
|
2014-02-13 07:58:31 -08:00
|
|
|
bool obs_initialized(void)
|
|
|
|
{
|
|
|
|
return obs != NULL;
|
|
|
|
}
|
|
|
|
|
2014-07-27 09:43:32 -07:00
|
|
|
uint32_t obs_get_version(void)
|
|
|
|
{
|
|
|
|
return LIBOBS_API_VER;
|
|
|
|
}
|
|
|
|
|
2014-06-25 00:21:16 -07:00
|
|
|
void obs_set_locale(const char *locale)
|
|
|
|
{
|
(API Change) Refactor module handling
Changed API:
- char *obs_find_plugin_file(const char *sub_path);
Changed to: char *obs_module_file(const char *file);
Cahnge it so you no longer need to specify a sub-path such as:
obs_find_plugin_file("module_name/file.ext")
Instead, now automatically handle the module data path so all you need
to do is:
obs_module_file("file.ext")
- int obs_load_module(const char *name);
Changed to: int obs_open_module(obs_module_t *module,
const char *path,
const char *data_path);
bool obs_init_module(obs_module_t module);
Change the module loading API so that if the front-end chooses, it can
load modules directly from a specified path, and associate a data
directory with it on the spot.
The module will not be initialized immediately; obs_init_module must
be called on the module pointer in order to fully initialize the
module. This is done so a module can be disabled by the front-end if
the it so chooses.
New API:
- void obs_add_module_path(const char *bin, const char *data);
These functions allow you to specify new module search paths to add,
and allow you to search through them, or optionally just load all
modules from them. If the string %module% is included, it will
replace it with the module's name when that string is used as a
lookup. Data paths are now directly added to the module's internal
storage structure, and when obs_find_module_file is used, it will look
up the pointer to the obs_module structure and get its data directory
that way.
Example:
obs_add_module_path("/opt/obs/my-modules/%module%/bin",
"/opt/obs/my-modules/%module%/data");
This would cause it to additionally look for the binary of a
hypthetical module named "foo" at /opt/obs/my-modules/foo/bin/foo.so
(or libfoo.so), and then look for the data in
/opt/obs/my-modules/foo/data.
This gives the front-end more flexibility for handling third-party
plugin modules, or handling all plugin modules in a custom way.
- void obs_find_modules(obs_find_module_callback_t callback, void
*param);
This searches the existing paths for modules and calls the callback
function when any are found. Useful for plugin management and custom
handling of the paths by the front-end if desired.
- void obs_load_all_modules(void);
Search through the paths and both loads and initializes all modules
automatically without custom handling.
- void obs_enum_modules(obs_enum_module_callback_t callback,
void *param);
Enumerates currently opened modules.
2014-07-27 12:00:11 -07:00
|
|
|
struct obs_module *module;
|
2014-06-25 00:21:16 -07:00
|
|
|
if (!obs)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (obs->locale)
|
|
|
|
bfree(obs->locale);
|
|
|
|
obs->locale = bstrdup(locale);
|
2014-06-25 00:22:54 -07:00
|
|
|
|
(API Change) Refactor module handling
Changed API:
- char *obs_find_plugin_file(const char *sub_path);
Changed to: char *obs_module_file(const char *file);
Cahnge it so you no longer need to specify a sub-path such as:
obs_find_plugin_file("module_name/file.ext")
Instead, now automatically handle the module data path so all you need
to do is:
obs_module_file("file.ext")
- int obs_load_module(const char *name);
Changed to: int obs_open_module(obs_module_t *module,
const char *path,
const char *data_path);
bool obs_init_module(obs_module_t module);
Change the module loading API so that if the front-end chooses, it can
load modules directly from a specified path, and associate a data
directory with it on the spot.
The module will not be initialized immediately; obs_init_module must
be called on the module pointer in order to fully initialize the
module. This is done so a module can be disabled by the front-end if
the it so chooses.
New API:
- void obs_add_module_path(const char *bin, const char *data);
These functions allow you to specify new module search paths to add,
and allow you to search through them, or optionally just load all
modules from them. If the string %module% is included, it will
replace it with the module's name when that string is used as a
lookup. Data paths are now directly added to the module's internal
storage structure, and when obs_find_module_file is used, it will look
up the pointer to the obs_module structure and get its data directory
that way.
Example:
obs_add_module_path("/opt/obs/my-modules/%module%/bin",
"/opt/obs/my-modules/%module%/data");
This would cause it to additionally look for the binary of a
hypthetical module named "foo" at /opt/obs/my-modules/foo/bin/foo.so
(or libfoo.so), and then look for the data in
/opt/obs/my-modules/foo/data.
This gives the front-end more flexibility for handling third-party
plugin modules, or handling all plugin modules in a custom way.
- void obs_find_modules(obs_find_module_callback_t callback, void
*param);
This searches the existing paths for modules and calls the callback
function when any are found. Useful for plugin management and custom
handling of the paths by the front-end if desired.
- void obs_load_all_modules(void);
Search through the paths and both loads and initializes all modules
automatically without custom handling.
- void obs_enum_modules(obs_enum_module_callback_t callback,
void *param);
Enumerates currently opened modules.
2014-07-27 12:00:11 -07:00
|
|
|
module = obs->first_module;
|
|
|
|
while (module) {
|
2014-06-25 00:22:54 -07:00
|
|
|
if (module->set_locale)
|
|
|
|
module->set_locale(locale);
|
(API Change) Refactor module handling
Changed API:
- char *obs_find_plugin_file(const char *sub_path);
Changed to: char *obs_module_file(const char *file);
Cahnge it so you no longer need to specify a sub-path such as:
obs_find_plugin_file("module_name/file.ext")
Instead, now automatically handle the module data path so all you need
to do is:
obs_module_file("file.ext")
- int obs_load_module(const char *name);
Changed to: int obs_open_module(obs_module_t *module,
const char *path,
const char *data_path);
bool obs_init_module(obs_module_t module);
Change the module loading API so that if the front-end chooses, it can
load modules directly from a specified path, and associate a data
directory with it on the spot.
The module will not be initialized immediately; obs_init_module must
be called on the module pointer in order to fully initialize the
module. This is done so a module can be disabled by the front-end if
the it so chooses.
New API:
- void obs_add_module_path(const char *bin, const char *data);
These functions allow you to specify new module search paths to add,
and allow you to search through them, or optionally just load all
modules from them. If the string %module% is included, it will
replace it with the module's name when that string is used as a
lookup. Data paths are now directly added to the module's internal
storage structure, and when obs_find_module_file is used, it will look
up the pointer to the obs_module structure and get its data directory
that way.
Example:
obs_add_module_path("/opt/obs/my-modules/%module%/bin",
"/opt/obs/my-modules/%module%/data");
This would cause it to additionally look for the binary of a
hypthetical module named "foo" at /opt/obs/my-modules/foo/bin/foo.so
(or libfoo.so), and then look for the data in
/opt/obs/my-modules/foo/data.
This gives the front-end more flexibility for handling third-party
plugin modules, or handling all plugin modules in a custom way.
- void obs_find_modules(obs_find_module_callback_t callback, void
*param);
This searches the existing paths for modules and calls the callback
function when any are found. Useful for plugin management and custom
handling of the paths by the front-end if desired.
- void obs_load_all_modules(void);
Search through the paths and both loads and initializes all modules
automatically without custom handling.
- void obs_enum_modules(obs_enum_module_callback_t callback,
void *param);
Enumerates currently opened modules.
2014-07-27 12:00:11 -07:00
|
|
|
|
|
|
|
module = module->next;
|
2014-06-25 00:22:54 -07:00
|
|
|
}
|
2014-06-25 00:21:16 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
const char *obs_get_locale(void)
|
|
|
|
{
|
|
|
|
return obs ? obs->locale : NULL;
|
|
|
|
}
|
|
|
|
|
2014-07-20 17:40:57 -07:00
|
|
|
#define OBS_SIZE_MIN 2
|
|
|
|
#define OBS_SIZE_MAX (32 * 1024)
|
|
|
|
|
|
|
|
static inline bool size_valid(uint32_t width, uint32_t height)
|
2013-10-14 12:37:52 -07:00
|
|
|
{
|
2014-07-20 17:40:57 -07:00
|
|
|
return (width >= OBS_SIZE_MIN && height >= OBS_SIZE_MIN &&
|
|
|
|
width <= OBS_SIZE_MAX && height <= OBS_SIZE_MAX);
|
|
|
|
}
|
|
|
|
|
|
|
|
int obs_reset_video(struct obs_video_info *ovi)
|
|
|
|
{
|
|
|
|
if (!obs) return OBS_VIDEO_FAIL;
|
2014-02-22 19:14:19 -08:00
|
|
|
|
|
|
|
/* don't allow changing of video settings if active. */
|
|
|
|
if (obs->video.video && video_output_active(obs->video.video))
|
2014-07-20 17:40:57 -07:00
|
|
|
return OBS_VIDEO_CURRENTLY_ACTIVE;
|
|
|
|
|
|
|
|
if (!size_valid(ovi->output_width, ovi->output_height) ||
|
|
|
|
!size_valid(ovi->base_width, ovi->base_height))
|
|
|
|
return OBS_VIDEO_INVALID_PARAM;
|
2014-02-22 19:14:19 -08:00
|
|
|
|
2014-02-05 20:03:06 -08:00
|
|
|
struct obs_core_video *video = &obs->video;
|
2013-11-26 21:20:11 -08:00
|
|
|
|
2014-02-22 19:14:19 -08:00
|
|
|
stop_video();
|
2013-11-20 14:00:16 -08:00
|
|
|
obs_free_video();
|
2013-10-14 12:37:52 -07:00
|
|
|
|
2013-11-26 21:20:11 -08:00
|
|
|
if (!ovi) {
|
|
|
|
obs_free_graphics();
|
2014-07-20 17:40:57 -07:00
|
|
|
return OBS_VIDEO_SUCCESS;
|
2013-11-26 21:20:11 -08:00
|
|
|
}
|
|
|
|
|
2014-07-13 05:01:53 -07:00
|
|
|
/* align to multiple-of-two and SSE alignment sizes */
|
|
|
|
ovi->output_width &= 0xFFFFFFFC;
|
|
|
|
ovi->output_height &= 0xFFFFFFFE;
|
|
|
|
|
2014-07-20 17:40:57 -07:00
|
|
|
if (!video->graphics) {
|
|
|
|
int errorcode = obs_init_graphics(ovi);
|
2014-10-14 01:44:59 -07:00
|
|
|
if (errorcode != OBS_VIDEO_SUCCESS) {
|
|
|
|
obs_free_graphics();
|
2014-07-20 17:40:57 -07:00
|
|
|
return errorcode;
|
2014-10-14 01:44:59 -07:00
|
|
|
}
|
2014-07-20 17:40:57 -07:00
|
|
|
}
|
2013-11-26 21:20:11 -08:00
|
|
|
|
2015-07-05 23:51:16 -07:00
|
|
|
blog(LOG_INFO, "---------------------------------");
|
2014-07-13 05:02:44 -07:00
|
|
|
blog(LOG_INFO, "video settings reset:\n"
|
|
|
|
"\tbase resolution: %dx%d\n"
|
|
|
|
"\toutput resolution: %dx%d\n"
|
2015-04-17 20:04:30 -07:00
|
|
|
"\tfps: %d/%d\n"
|
|
|
|
"\tformat: %s",
|
2014-07-13 05:02:44 -07:00
|
|
|
ovi->base_width, ovi->base_height,
|
|
|
|
ovi->output_width, ovi->output_height,
|
2015-04-17 20:04:30 -07:00
|
|
|
ovi->fps_num, ovi->fps_den,
|
|
|
|
get_video_format_name(ovi->output_format));
|
2014-07-13 05:02:44 -07:00
|
|
|
|
2013-11-26 21:20:11 -08:00
|
|
|
return obs_init_video(ovi);
|
2013-10-14 12:37:52 -07:00
|
|
|
}
|
|
|
|
|
2015-03-07 04:47:12 -08:00
|
|
|
bool obs_reset_audio(const struct obs_audio_info *oai)
|
2013-09-30 19:37:13 -07:00
|
|
|
{
|
2015-03-07 04:47:12 -08:00
|
|
|
struct audio_output_info ai;
|
|
|
|
|
2014-02-22 19:14:19 -08:00
|
|
|
if (!obs) return false;
|
|
|
|
|
|
|
|
/* don't allow changing of audio settings if active. */
|
|
|
|
if (obs->audio.audio && audio_output_active(obs->audio.audio))
|
|
|
|
return false;
|
|
|
|
|
2014-01-09 18:08:20 -08:00
|
|
|
obs_free_audio();
|
2015-03-07 04:47:12 -08:00
|
|
|
if (!oai)
|
2013-11-26 21:20:11 -08:00
|
|
|
return true;
|
|
|
|
|
2015-03-07 04:47:12 -08:00
|
|
|
ai.name = "Audio";
|
|
|
|
ai.samples_per_sec = oai->samples_per_sec;
|
|
|
|
ai.format = AUDIO_FORMAT_FLOAT_PLANAR;
|
|
|
|
ai.speakers = oai->speakers;
|
libobs: Implement new audio subsystem
The new audio subsystem fixes two issues:
- First Primary issue it fixes is the ability for parent sources to
intercept the audio of child sources, and do custom processing on
them. The main reason for this was the ability to do custom
cross-fading in transitions, but it's also useful for things such as
side-chain effects, applying audio effects to entire scenes, applying
scene-specific audio filters on sub-sources, and other such
possibilities.
- The secondary issue that needed fixing was audio buffering.
Previously, audio buffering was always a fixed buffer size, so it
would always have exactly a certain number of milliseconds of audio
buffering (and thus output delay). Instead, it now dynamically
increases audio buffering only as necessary, minimizing output delay,
and removing the need for users to have to worry about an audio
buffering setting.
The new design makes it so that audio from the leaves of the scene graph
flow to the root nodes, and can be intercepted by parent sources. Each
audio source handles its own buffering, and each audio tick a specific
number of audio frames are popped from the front of the circular buffer
on each audio source. Composite sources (such as scenes) can access the
audio for child sources and do custom processing or mixing on that
audio. Composite sources use the audio_render callback of sources to do
synchronous or deferred audio processing per audio tick. Things like
scenes now mix audio from their sub-sources.
2015-12-20 03:06:35 -08:00
|
|
|
ai.input_callback = audio_callback;
|
2015-03-07 04:47:12 -08:00
|
|
|
|
2015-07-05 23:51:16 -07:00
|
|
|
blog(LOG_INFO, "---------------------------------");
|
2014-07-13 05:02:44 -07:00
|
|
|
blog(LOG_INFO, "audio settings reset:\n"
|
|
|
|
"\tsamples per sec: %d\n"
|
libobs: Implement new audio subsystem
The new audio subsystem fixes two issues:
- First Primary issue it fixes is the ability for parent sources to
intercept the audio of child sources, and do custom processing on
them. The main reason for this was the ability to do custom
cross-fading in transitions, but it's also useful for things such as
side-chain effects, applying audio effects to entire scenes, applying
scene-specific audio filters on sub-sources, and other such
possibilities.
- The secondary issue that needed fixing was audio buffering.
Previously, audio buffering was always a fixed buffer size, so it
would always have exactly a certain number of milliseconds of audio
buffering (and thus output delay). Instead, it now dynamically
increases audio buffering only as necessary, minimizing output delay,
and removing the need for users to have to worry about an audio
buffering setting.
The new design makes it so that audio from the leaves of the scene graph
flow to the root nodes, and can be intercepted by parent sources. Each
audio source handles its own buffering, and each audio tick a specific
number of audio frames are popped from the front of the circular buffer
on each audio source. Composite sources (such as scenes) can access the
audio for child sources and do custom processing or mixing on that
audio. Composite sources use the audio_render callback of sources to do
synchronous or deferred audio processing per audio tick. Things like
scenes now mix audio from their sub-sources.
2015-12-20 03:06:35 -08:00
|
|
|
"\tspeakers: %d",
|
2015-03-07 04:47:12 -08:00
|
|
|
(int)ai.samples_per_sec,
|
libobs: Implement new audio subsystem
The new audio subsystem fixes two issues:
- First Primary issue it fixes is the ability for parent sources to
intercept the audio of child sources, and do custom processing on
them. The main reason for this was the ability to do custom
cross-fading in transitions, but it's also useful for things such as
side-chain effects, applying audio effects to entire scenes, applying
scene-specific audio filters on sub-sources, and other such
possibilities.
- The secondary issue that needed fixing was audio buffering.
Previously, audio buffering was always a fixed buffer size, so it
would always have exactly a certain number of milliseconds of audio
buffering (and thus output delay). Instead, it now dynamically
increases audio buffering only as necessary, minimizing output delay,
and removing the need for users to have to worry about an audio
buffering setting.
The new design makes it so that audio from the leaves of the scene graph
flow to the root nodes, and can be intercepted by parent sources. Each
audio source handles its own buffering, and each audio tick a specific
number of audio frames are popped from the front of the circular buffer
on each audio source. Composite sources (such as scenes) can access the
audio for child sources and do custom processing or mixing on that
audio. Composite sources use the audio_render callback of sources to do
synchronous or deferred audio processing per audio tick. Things like
scenes now mix audio from their sub-sources.
2015-12-20 03:06:35 -08:00
|
|
|
(int)ai.speakers);
|
2014-07-13 05:02:44 -07:00
|
|
|
|
2015-03-07 04:47:12 -08:00
|
|
|
return obs_init_audio(&ai);
|
2013-09-30 19:37:13 -07:00
|
|
|
}
|
|
|
|
|
2013-12-06 05:38:19 -08:00
|
|
|
bool obs_get_video_info(struct obs_video_info *ovi)
|
|
|
|
{
|
2014-02-05 20:03:06 -08:00
|
|
|
struct obs_core_video *video = &obs->video;
|
Simplify media i/o interfaces
Completely revamped the entire media i/o data and handlers. The
original idea was to have a system that would have connecting media
inputs and outputs, but at a certain point I realized that this was an
unnecessary complexity for what we wanted to do. (Also, it reminded me
of directshow filters, and I HATE directshow with a passion, and
wouldn't wish it upon my greatest enemy)
Now, audio/video outputs are connected to directly, with better callback
handlers, and will eventually have the ability to automatically handle
conversions such as 4:4:4 to 4:2:0 when connecting to an input that uses
them. Doing this will allow the video/audio i/o handlers to also
prevent duplicate conversion, as well as make it easier/simple to use.
My true goal for this is to make output and encoder plugins as simple to
create as possible. I want to be able to be able to create an output
plugin with almost no real hassle of having to worry about image
conversions, media inputs/outputs, etc. A plugin developer shouldn't
have to handle that sort of stuff when he/she doesn't really need to.
Plugins will be able to simply create a callback via obs_video() and/or
obs_audio(), and they will automatically receive the audio/video data in
the formats requested via a simple callback, without needing to do
almost anything else at all.
2014-01-14 00:58:47 -08:00
|
|
|
const struct video_output_info *info;
|
2013-12-06 05:38:19 -08:00
|
|
|
|
2013-12-22 22:40:07 -08:00
|
|
|
if (!obs || !video->graphics)
|
2013-12-06 05:38:19 -08:00
|
|
|
return false;
|
|
|
|
|
2014-08-05 15:07:54 -07:00
|
|
|
info = video_output_get_info(video->video);
|
2014-10-05 13:33:05 -07:00
|
|
|
if (!info)
|
|
|
|
return false;
|
2013-12-06 05:38:19 -08:00
|
|
|
|
|
|
|
memset(ovi, 0, sizeof(struct obs_video_info));
|
|
|
|
ovi->base_width = video->base_width;
|
|
|
|
ovi->base_height = video->base_height;
|
2014-12-11 19:51:30 -08:00
|
|
|
ovi->gpu_conversion= video->gpu_conversion;
|
2014-12-14 23:45:44 -08:00
|
|
|
ovi->scale_type = video->scale_type;
|
2014-12-11 19:51:30 -08:00
|
|
|
ovi->colorspace = info->colorspace;
|
|
|
|
ovi->range = info->range;
|
2013-12-06 05:38:19 -08:00
|
|
|
ovi->output_width = info->width;
|
|
|
|
ovi->output_height = info->height;
|
2014-01-19 02:16:41 -08:00
|
|
|
ovi->output_format = info->format;
|
2013-12-06 05:38:19 -08:00
|
|
|
ovi->fps_num = info->fps_num;
|
|
|
|
ovi->fps_den = info->fps_den;
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2015-03-07 04:47:12 -08:00
|
|
|
bool obs_get_audio_info(struct obs_audio_info *oai)
|
2013-12-06 05:38:19 -08:00
|
|
|
{
|
2014-02-05 20:03:06 -08:00
|
|
|
struct obs_core_audio *audio = &obs->audio;
|
Simplify media i/o interfaces
Completely revamped the entire media i/o data and handlers. The
original idea was to have a system that would have connecting media
inputs and outputs, but at a certain point I realized that this was an
unnecessary complexity for what we wanted to do. (Also, it reminded me
of directshow filters, and I HATE directshow with a passion, and
wouldn't wish it upon my greatest enemy)
Now, audio/video outputs are connected to directly, with better callback
handlers, and will eventually have the ability to automatically handle
conversions such as 4:4:4 to 4:2:0 when connecting to an input that uses
them. Doing this will allow the video/audio i/o handlers to also
prevent duplicate conversion, as well as make it easier/simple to use.
My true goal for this is to make output and encoder plugins as simple to
create as possible. I want to be able to be able to create an output
plugin with almost no real hassle of having to worry about image
conversions, media inputs/outputs, etc. A plugin developer shouldn't
have to handle that sort of stuff when he/she doesn't really need to.
Plugins will be able to simply create a callback via obs_video() and/or
obs_audio(), and they will automatically receive the audio/video data in
the formats requested via a simple callback, without needing to do
almost anything else at all.
2014-01-14 00:58:47 -08:00
|
|
|
const struct audio_output_info *info;
|
2013-12-06 05:38:19 -08:00
|
|
|
|
2015-03-07 04:47:12 -08:00
|
|
|
if (!obs || !oai || !audio->audio)
|
2013-12-06 05:38:19 -08:00
|
|
|
return false;
|
|
|
|
|
2014-08-05 15:07:54 -07:00
|
|
|
info = audio_output_get_info(audio->audio);
|
2013-12-06 05:38:19 -08:00
|
|
|
|
2015-03-07 04:47:12 -08:00
|
|
|
oai->samples_per_sec = info->samples_per_sec;
|
|
|
|
oai->speakers = info->speakers;
|
2013-12-06 05:38:19 -08:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2015-12-29 16:08:36 -08:00
|
|
|
bool obs_enum_source_types(size_t idx, const char **id)
|
|
|
|
{
|
|
|
|
if (!obs) return false;
|
|
|
|
|
|
|
|
if (idx >= obs->source_types.num)
|
|
|
|
return false;
|
|
|
|
*id = obs->source_types.array[idx].id;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2013-12-20 16:23:19 -08:00
|
|
|
bool obs_enum_input_types(size_t idx, const char **id)
|
2013-09-30 19:37:13 -07:00
|
|
|
{
|
2014-02-23 21:39:33 -08:00
|
|
|
if (!obs) return false;
|
|
|
|
|
2013-09-30 19:37:13 -07:00
|
|
|
if (idx >= obs->input_types.num)
|
|
|
|
return false;
|
2013-12-20 16:23:19 -08:00
|
|
|
*id = obs->input_types.array[idx].id;
|
2013-09-30 19:37:13 -07:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2013-12-20 16:23:19 -08:00
|
|
|
bool obs_enum_filter_types(size_t idx, const char **id)
|
2013-09-30 19:37:13 -07:00
|
|
|
{
|
2014-02-23 21:39:33 -08:00
|
|
|
if (!obs) return false;
|
|
|
|
|
2013-09-30 19:37:13 -07:00
|
|
|
if (idx >= obs->filter_types.num)
|
|
|
|
return false;
|
2013-12-20 16:23:19 -08:00
|
|
|
*id = obs->filter_types.array[idx].id;
|
2013-09-30 19:37:13 -07:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2013-12-20 16:23:19 -08:00
|
|
|
bool obs_enum_transition_types(size_t idx, const char **id)
|
2013-09-30 19:37:13 -07:00
|
|
|
{
|
2014-02-23 21:39:33 -08:00
|
|
|
if (!obs) return false;
|
|
|
|
|
2013-09-30 19:37:13 -07:00
|
|
|
if (idx >= obs->transition_types.num)
|
|
|
|
return false;
|
2013-12-20 16:23:19 -08:00
|
|
|
*id = obs->transition_types.array[idx].id;
|
2013-09-30 19:37:13 -07:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2013-12-20 16:23:19 -08:00
|
|
|
bool obs_enum_output_types(size_t idx, const char **id)
|
2013-09-30 19:37:13 -07:00
|
|
|
{
|
2014-02-23 21:39:33 -08:00
|
|
|
if (!obs) return false;
|
|
|
|
|
2013-09-30 19:37:13 -07:00
|
|
|
if (idx >= obs->output_types.num)
|
|
|
|
return false;
|
2013-12-20 16:23:19 -08:00
|
|
|
*id = obs->output_types.array[idx].id;
|
2013-09-30 19:37:13 -07:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
libobs: Add services API, reduce repeated code
Add API for streaming services. The services API simplifies the
creation of custom service features and user interface.
Custom streaming services later on will be able to do things such as:
- Be able to use service-specific APIs via modules, allowing a more
direct means of communicating with the service and requesting or
setting service-specific information
- Get URL/stream key via other means of authentication such as OAuth,
or be able to build custom URLs for services that require that sort
of thing.
- Query information (such as viewer count, chat, follower
notifications, and other information)
- Set channel information (such as current game, current channel title,
activating commercials)
Also, I reduce some repeated code that was used for all libobs objects.
This includes the name of the object, the private data, settings, as
well as the signal and procedure handlers.
I also switched to using linked lists for the global object lists,
rather than using an array of pointers (you could say it was..
pointless.) ..Anyway, the linked list info is also stored in the shared
context data structure.
2014-04-19 20:38:53 -07:00
|
|
|
bool obs_enum_encoder_types(size_t idx, const char **id)
|
|
|
|
{
|
|
|
|
if (!obs) return false;
|
|
|
|
|
|
|
|
if (idx >= obs->encoder_types.num)
|
|
|
|
return false;
|
|
|
|
*id = obs->encoder_types.array[idx].id;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool obs_enum_service_types(size_t idx, const char **id)
|
|
|
|
{
|
|
|
|
if (!obs) return false;
|
|
|
|
|
|
|
|
if (idx >= obs->service_types.num)
|
|
|
|
return false;
|
|
|
|
*id = obs->service_types.array[idx].id;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2014-08-04 05:48:58 -07:00
|
|
|
void obs_enter_graphics(void)
|
2013-09-30 19:37:13 -07:00
|
|
|
{
|
2014-08-04 05:48:58 -07:00
|
|
|
if (obs && obs->video.graphics)
|
2014-08-07 23:42:07 -07:00
|
|
|
gs_enter_context(obs->video.graphics);
|
2014-08-04 05:48:58 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
void obs_leave_graphics(void)
|
|
|
|
{
|
|
|
|
if (obs && obs->video.graphics)
|
2014-08-07 23:42:07 -07:00
|
|
|
gs_leave_context();
|
2013-09-30 19:37:13 -07:00
|
|
|
}
|
|
|
|
|
2014-09-25 17:44:05 -07:00
|
|
|
audio_t *obs_get_audio(void)
|
Simplify media i/o interfaces
Completely revamped the entire media i/o data and handlers. The
original idea was to have a system that would have connecting media
inputs and outputs, but at a certain point I realized that this was an
unnecessary complexity for what we wanted to do. (Also, it reminded me
of directshow filters, and I HATE directshow with a passion, and
wouldn't wish it upon my greatest enemy)
Now, audio/video outputs are connected to directly, with better callback
handlers, and will eventually have the ability to automatically handle
conversions such as 4:4:4 to 4:2:0 when connecting to an input that uses
them. Doing this will allow the video/audio i/o handlers to also
prevent duplicate conversion, as well as make it easier/simple to use.
My true goal for this is to make output and encoder plugins as simple to
create as possible. I want to be able to be able to create an output
plugin with almost no real hassle of having to worry about image
conversions, media inputs/outputs, etc. A plugin developer shouldn't
have to handle that sort of stuff when he/she doesn't really need to.
Plugins will be able to simply create a callback via obs_video() and/or
obs_audio(), and they will automatically receive the audio/video data in
the formats requested via a simple callback, without needing to do
almost anything else at all.
2014-01-14 00:58:47 -08:00
|
|
|
{
|
|
|
|
return (obs != NULL) ? obs->audio.audio : NULL;
|
|
|
|
}
|
|
|
|
|
2014-09-25 17:44:05 -07:00
|
|
|
video_t *obs_get_video(void)
|
2013-09-30 19:37:13 -07:00
|
|
|
{
|
Simplify media i/o interfaces
Completely revamped the entire media i/o data and handlers. The
original idea was to have a system that would have connecting media
inputs and outputs, but at a certain point I realized that this was an
unnecessary complexity for what we wanted to do. (Also, it reminded me
of directshow filters, and I HATE directshow with a passion, and
wouldn't wish it upon my greatest enemy)
Now, audio/video outputs are connected to directly, with better callback
handlers, and will eventually have the ability to automatically handle
conversions such as 4:4:4 to 4:2:0 when connecting to an input that uses
them. Doing this will allow the video/audio i/o handlers to also
prevent duplicate conversion, as well as make it easier/simple to use.
My true goal for this is to make output and encoder plugins as simple to
create as possible. I want to be able to be able to create an output
plugin with almost no real hassle of having to worry about image
conversions, media inputs/outputs, etc. A plugin developer shouldn't
have to handle that sort of stuff when he/she doesn't really need to.
Plugins will be able to simply create a callback via obs_video() and/or
obs_audio(), and they will automatically receive the audio/video data in
the formats requested via a simple callback, without needing to do
almost anything else at all.
2014-01-14 00:58:47 -08:00
|
|
|
return (obs != NULL) ? obs->video.video : NULL;
|
2013-09-30 19:37:13 -07:00
|
|
|
}
|
|
|
|
|
2014-01-31 23:49:50 -08:00
|
|
|
/* TODO: optimize this later so it's not just O(N) string lookups */
|
Revamp API and start using doxygen
The API used to be designed in such a way to where it would expect
exports for each individual source/output/encoder/etc. You would export
functions for each and it would automatically load those functions based
on a specific naming scheme from the module.
The idea behind this was that I wanted to limit the usage of structures
in the API so only functions could be used. It was an interesting idea
in theory, but this idea turned out to be flawed in a number of ways:
1.) Requiring exports to create sources/outputs/encoders/etc meant that
you could not create them by any other means, which meant that
things like faruton's .net plugin would become difficult.
2.) Export function declarations could not be checked, therefore if you
created a function with the wrong parameters and parameter types,
the compiler wouldn't know how to check for that.
3.) Required overly complex load functions in libobs just to handle it.
It makes much more sense to just have a load function that you call
manually. Complexity is the bane of all good programs.
4.) It required that you have functions of specific names, which looked
and felt somewhat unsightly.
So, to fix these issues, I replaced it with a more commonly used API
scheme, seen commonly in places like kernels and typical C libraries
with abstraction. You simply create a structure that contains the
callback definitions, and you pass it to a function to register that
definition (such as obs_register_source), which you call in the
obs_module_load of the module.
It will also automatically check the structure size and ensure that it
only loads the required values if the structure happened to add new
values in an API change.
The "main" source file for each module must include obs-module.h, and
must use OBS_DECLARE_MODULE() within that source file.
Also, started writing some doxygen documentation in to the main library
headers. Will add more detailed documentation as I go.
2014-02-12 07:04:50 -08:00
|
|
|
static inline struct obs_modal_ui *get_modal_ui_callback(const char *id,
|
2014-01-31 23:49:50 -08:00
|
|
|
const char *task, const char *target)
|
|
|
|
{
|
Revamp API and start using doxygen
The API used to be designed in such a way to where it would expect
exports for each individual source/output/encoder/etc. You would export
functions for each and it would automatically load those functions based
on a specific naming scheme from the module.
The idea behind this was that I wanted to limit the usage of structures
in the API so only functions could be used. It was an interesting idea
in theory, but this idea turned out to be flawed in a number of ways:
1.) Requiring exports to create sources/outputs/encoders/etc meant that
you could not create them by any other means, which meant that
things like faruton's .net plugin would become difficult.
2.) Export function declarations could not be checked, therefore if you
created a function with the wrong parameters and parameter types,
the compiler wouldn't know how to check for that.
3.) Required overly complex load functions in libobs just to handle it.
It makes much more sense to just have a load function that you call
manually. Complexity is the bane of all good programs.
4.) It required that you have functions of specific names, which looked
and felt somewhat unsightly.
So, to fix these issues, I replaced it with a more commonly used API
scheme, seen commonly in places like kernels and typical C libraries
with abstraction. You simply create a structure that contains the
callback definitions, and you pass it to a function to register that
definition (such as obs_register_source), which you call in the
obs_module_load of the module.
It will also automatically check the structure size and ensure that it
only loads the required values if the structure happened to add new
values in an API change.
The "main" source file for each module must include obs-module.h, and
must use OBS_DECLARE_MODULE() within that source file.
Also, started writing some doxygen documentation in to the main library
headers. Will add more detailed documentation as I go.
2014-02-12 07:04:50 -08:00
|
|
|
for (size_t i = 0; i < obs->modal_ui_callbacks.num; i++) {
|
|
|
|
struct obs_modal_ui *callback = obs->modal_ui_callbacks.array+i;
|
2014-01-31 23:49:50 -08:00
|
|
|
|
Revamp API and start using doxygen
The API used to be designed in such a way to where it would expect
exports for each individual source/output/encoder/etc. You would export
functions for each and it would automatically load those functions based
on a specific naming scheme from the module.
The idea behind this was that I wanted to limit the usage of structures
in the API so only functions could be used. It was an interesting idea
in theory, but this idea turned out to be flawed in a number of ways:
1.) Requiring exports to create sources/outputs/encoders/etc meant that
you could not create them by any other means, which meant that
things like faruton's .net plugin would become difficult.
2.) Export function declarations could not be checked, therefore if you
created a function with the wrong parameters and parameter types,
the compiler wouldn't know how to check for that.
3.) Required overly complex load functions in libobs just to handle it.
It makes much more sense to just have a load function that you call
manually. Complexity is the bane of all good programs.
4.) It required that you have functions of specific names, which looked
and felt somewhat unsightly.
So, to fix these issues, I replaced it with a more commonly used API
scheme, seen commonly in places like kernels and typical C libraries
with abstraction. You simply create a structure that contains the
callback definitions, and you pass it to a function to register that
definition (such as obs_register_source), which you call in the
obs_module_load of the module.
It will also automatically check the structure size and ensure that it
only loads the required values if the structure happened to add new
values in an API change.
The "main" source file for each module must include obs-module.h, and
must use OBS_DECLARE_MODULE() within that source file.
Also, started writing some doxygen documentation in to the main library
headers. Will add more detailed documentation as I go.
2014-02-12 07:04:50 -08:00
|
|
|
if (strcmp(callback->id, id) == 0 &&
|
2014-02-01 16:43:32 -08:00
|
|
|
strcmp(callback->task, task) == 0 &&
|
|
|
|
strcmp(callback->target, target) == 0)
|
2014-01-31 23:49:50 -08:00
|
|
|
return callback;
|
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
Revamp API and start using doxygen
The API used to be designed in such a way to where it would expect
exports for each individual source/output/encoder/etc. You would export
functions for each and it would automatically load those functions based
on a specific naming scheme from the module.
The idea behind this was that I wanted to limit the usage of structures
in the API so only functions could be used. It was an interesting idea
in theory, but this idea turned out to be flawed in a number of ways:
1.) Requiring exports to create sources/outputs/encoders/etc meant that
you could not create them by any other means, which meant that
things like faruton's .net plugin would become difficult.
2.) Export function declarations could not be checked, therefore if you
created a function with the wrong parameters and parameter types,
the compiler wouldn't know how to check for that.
3.) Required overly complex load functions in libobs just to handle it.
It makes much more sense to just have a load function that you call
manually. Complexity is the bane of all good programs.
4.) It required that you have functions of specific names, which looked
and felt somewhat unsightly.
So, to fix these issues, I replaced it with a more commonly used API
scheme, seen commonly in places like kernels and typical C libraries
with abstraction. You simply create a structure that contains the
callback definitions, and you pass it to a function to register that
definition (such as obs_register_source), which you call in the
obs_module_load of the module.
It will also automatically check the structure size and ensure that it
only loads the required values if the structure happened to add new
values in an API change.
The "main" source file for each module must include obs-module.h, and
must use OBS_DECLARE_MODULE() within that source file.
Also, started writing some doxygen documentation in to the main library
headers. Will add more detailed documentation as I go.
2014-02-12 07:04:50 -08:00
|
|
|
static inline struct obs_modeless_ui *get_modeless_ui_callback(const char *id,
|
2014-02-01 11:48:35 -08:00
|
|
|
const char *task, const char *target)
|
|
|
|
{
|
Revamp API and start using doxygen
The API used to be designed in such a way to where it would expect
exports for each individual source/output/encoder/etc. You would export
functions for each and it would automatically load those functions based
on a specific naming scheme from the module.
The idea behind this was that I wanted to limit the usage of structures
in the API so only functions could be used. It was an interesting idea
in theory, but this idea turned out to be flawed in a number of ways:
1.) Requiring exports to create sources/outputs/encoders/etc meant that
you could not create them by any other means, which meant that
things like faruton's .net plugin would become difficult.
2.) Export function declarations could not be checked, therefore if you
created a function with the wrong parameters and parameter types,
the compiler wouldn't know how to check for that.
3.) Required overly complex load functions in libobs just to handle it.
It makes much more sense to just have a load function that you call
manually. Complexity is the bane of all good programs.
4.) It required that you have functions of specific names, which looked
and felt somewhat unsightly.
So, to fix these issues, I replaced it with a more commonly used API
scheme, seen commonly in places like kernels and typical C libraries
with abstraction. You simply create a structure that contains the
callback definitions, and you pass it to a function to register that
definition (such as obs_register_source), which you call in the
obs_module_load of the module.
It will also automatically check the structure size and ensure that it
only loads the required values if the structure happened to add new
values in an API change.
The "main" source file for each module must include obs-module.h, and
must use OBS_DECLARE_MODULE() within that source file.
Also, started writing some doxygen documentation in to the main library
headers. Will add more detailed documentation as I go.
2014-02-12 07:04:50 -08:00
|
|
|
for (size_t i = 0; i < obs->modeless_ui_callbacks.num; i++) {
|
2014-02-01 16:43:32 -08:00
|
|
|
struct obs_modeless_ui *callback;
|
Revamp API and start using doxygen
The API used to be designed in such a way to where it would expect
exports for each individual source/output/encoder/etc. You would export
functions for each and it would automatically load those functions based
on a specific naming scheme from the module.
The idea behind this was that I wanted to limit the usage of structures
in the API so only functions could be used. It was an interesting idea
in theory, but this idea turned out to be flawed in a number of ways:
1.) Requiring exports to create sources/outputs/encoders/etc meant that
you could not create them by any other means, which meant that
things like faruton's .net plugin would become difficult.
2.) Export function declarations could not be checked, therefore if you
created a function with the wrong parameters and parameter types,
the compiler wouldn't know how to check for that.
3.) Required overly complex load functions in libobs just to handle it.
It makes much more sense to just have a load function that you call
manually. Complexity is the bane of all good programs.
4.) It required that you have functions of specific names, which looked
and felt somewhat unsightly.
So, to fix these issues, I replaced it with a more commonly used API
scheme, seen commonly in places like kernels and typical C libraries
with abstraction. You simply create a structure that contains the
callback definitions, and you pass it to a function to register that
definition (such as obs_register_source), which you call in the
obs_module_load of the module.
It will also automatically check the structure size and ensure that it
only loads the required values if the structure happened to add new
values in an API change.
The "main" source file for each module must include obs-module.h, and
must use OBS_DECLARE_MODULE() within that source file.
Also, started writing some doxygen documentation in to the main library
headers. Will add more detailed documentation as I go.
2014-02-12 07:04:50 -08:00
|
|
|
callback = obs->modeless_ui_callbacks.array+i;
|
2014-02-01 11:48:35 -08:00
|
|
|
|
Revamp API and start using doxygen
The API used to be designed in such a way to where it would expect
exports for each individual source/output/encoder/etc. You would export
functions for each and it would automatically load those functions based
on a specific naming scheme from the module.
The idea behind this was that I wanted to limit the usage of structures
in the API so only functions could be used. It was an interesting idea
in theory, but this idea turned out to be flawed in a number of ways:
1.) Requiring exports to create sources/outputs/encoders/etc meant that
you could not create them by any other means, which meant that
things like faruton's .net plugin would become difficult.
2.) Export function declarations could not be checked, therefore if you
created a function with the wrong parameters and parameter types,
the compiler wouldn't know how to check for that.
3.) Required overly complex load functions in libobs just to handle it.
It makes much more sense to just have a load function that you call
manually. Complexity is the bane of all good programs.
4.) It required that you have functions of specific names, which looked
and felt somewhat unsightly.
So, to fix these issues, I replaced it with a more commonly used API
scheme, seen commonly in places like kernels and typical C libraries
with abstraction. You simply create a structure that contains the
callback definitions, and you pass it to a function to register that
definition (such as obs_register_source), which you call in the
obs_module_load of the module.
It will also automatically check the structure size and ensure that it
only loads the required values if the structure happened to add new
values in an API change.
The "main" source file for each module must include obs-module.h, and
must use OBS_DECLARE_MODULE() within that source file.
Also, started writing some doxygen documentation in to the main library
headers. Will add more detailed documentation as I go.
2014-02-12 07:04:50 -08:00
|
|
|
if (strcmp(callback->id, id) == 0 &&
|
2014-02-01 16:43:32 -08:00
|
|
|
strcmp(callback->task, task) == 0 &&
|
|
|
|
strcmp(callback->target, target) == 0)
|
2014-02-01 11:48:35 -08:00
|
|
|
return callback;
|
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2014-02-01 16:43:32 -08:00
|
|
|
int obs_exec_ui(const char *name, const char *task, const char *target,
|
2014-01-31 23:49:50 -08:00
|
|
|
void *data, void *ui_data)
|
|
|
|
{
|
2014-02-01 16:43:32 -08:00
|
|
|
struct obs_modal_ui *callback;
|
2014-01-31 23:49:50 -08:00
|
|
|
int errorcode = OBS_UI_NOTFOUND;
|
|
|
|
|
2014-02-23 21:39:33 -08:00
|
|
|
if (!obs) return errorcode;
|
|
|
|
|
2014-02-01 16:43:32 -08:00
|
|
|
callback = get_modal_ui_callback(name, task, target);
|
2014-01-31 23:49:50 -08:00
|
|
|
if (callback) {
|
Revamp API and start using doxygen
The API used to be designed in such a way to where it would expect
exports for each individual source/output/encoder/etc. You would export
functions for each and it would automatically load those functions based
on a specific naming scheme from the module.
The idea behind this was that I wanted to limit the usage of structures
in the API so only functions could be used. It was an interesting idea
in theory, but this idea turned out to be flawed in a number of ways:
1.) Requiring exports to create sources/outputs/encoders/etc meant that
you could not create them by any other means, which meant that
things like faruton's .net plugin would become difficult.
2.) Export function declarations could not be checked, therefore if you
created a function with the wrong parameters and parameter types,
the compiler wouldn't know how to check for that.
3.) Required overly complex load functions in libobs just to handle it.
It makes much more sense to just have a load function that you call
manually. Complexity is the bane of all good programs.
4.) It required that you have functions of specific names, which looked
and felt somewhat unsightly.
So, to fix these issues, I replaced it with a more commonly used API
scheme, seen commonly in places like kernels and typical C libraries
with abstraction. You simply create a structure that contains the
callback definitions, and you pass it to a function to register that
definition (such as obs_register_source), which you call in the
obs_module_load of the module.
It will also automatically check the structure size and ensure that it
only loads the required values if the structure happened to add new
values in an API change.
The "main" source file for each module must include obs-module.h, and
must use OBS_DECLARE_MODULE() within that source file.
Also, started writing some doxygen documentation in to the main library
headers. Will add more detailed documentation as I go.
2014-02-12 07:04:50 -08:00
|
|
|
bool success = callback->exec(data, ui_data);
|
2014-01-31 23:49:50 -08:00
|
|
|
errorcode = success ? OBS_UI_SUCCESS : OBS_UI_CANCEL;
|
|
|
|
}
|
|
|
|
|
|
|
|
return errorcode;
|
|
|
|
}
|
|
|
|
|
2014-02-01 11:48:35 -08:00
|
|
|
void *obs_create_ui(const char *name, const char *task, const char *target,
|
|
|
|
void *data, void *ui_data)
|
|
|
|
{
|
2014-02-01 16:43:32 -08:00
|
|
|
struct obs_modeless_ui *callback;
|
2014-02-01 11:48:35 -08:00
|
|
|
|
2014-02-23 21:39:33 -08:00
|
|
|
if (!obs) return NULL;
|
|
|
|
|
2014-02-01 11:48:35 -08:00
|
|
|
callback = get_modeless_ui_callback(name, task, target);
|
Revamp API and start using doxygen
The API used to be designed in such a way to where it would expect
exports for each individual source/output/encoder/etc. You would export
functions for each and it would automatically load those functions based
on a specific naming scheme from the module.
The idea behind this was that I wanted to limit the usage of structures
in the API so only functions could be used. It was an interesting idea
in theory, but this idea turned out to be flawed in a number of ways:
1.) Requiring exports to create sources/outputs/encoders/etc meant that
you could not create them by any other means, which meant that
things like faruton's .net plugin would become difficult.
2.) Export function declarations could not be checked, therefore if you
created a function with the wrong parameters and parameter types,
the compiler wouldn't know how to check for that.
3.) Required overly complex load functions in libobs just to handle it.
It makes much more sense to just have a load function that you call
manually. Complexity is the bane of all good programs.
4.) It required that you have functions of specific names, which looked
and felt somewhat unsightly.
So, to fix these issues, I replaced it with a more commonly used API
scheme, seen commonly in places like kernels and typical C libraries
with abstraction. You simply create a structure that contains the
callback definitions, and you pass it to a function to register that
definition (such as obs_register_source), which you call in the
obs_module_load of the module.
It will also automatically check the structure size and ensure that it
only loads the required values if the structure happened to add new
values in an API change.
The "main" source file for each module must include obs-module.h, and
must use OBS_DECLARE_MODULE() within that source file.
Also, started writing some doxygen documentation in to the main library
headers. Will add more detailed documentation as I go.
2014-02-12 07:04:50 -08:00
|
|
|
return callback ? callback->create(data, ui_data) : NULL;
|
2014-02-01 11:48:35 -08:00
|
|
|
}
|
|
|
|
|
2014-09-25 17:44:05 -07:00
|
|
|
obs_source_t *obs_get_output_source(uint32_t channel)
|
2013-11-20 14:00:16 -08:00
|
|
|
{
|
2014-02-23 21:39:33 -08:00
|
|
|
if (!obs) return NULL;
|
2014-08-04 08:41:15 -07:00
|
|
|
return obs_view_get_source(&obs->data.main_view, channel);
|
2013-09-30 19:37:13 -07:00
|
|
|
}
|
|
|
|
|
2014-09-25 17:44:05 -07:00
|
|
|
void obs_set_output_source(uint32_t channel, obs_source_t *source)
|
2013-09-30 19:37:13 -07:00
|
|
|
{
|
2014-02-13 07:58:31 -08:00
|
|
|
assert(channel < MAX_CHANNELS);
|
|
|
|
|
|
|
|
if (!obs) return;
|
|
|
|
if (channel >= MAX_CHANNELS) return;
|
|
|
|
|
2013-11-20 14:00:16 -08:00
|
|
|
struct obs_source *prev_source;
|
2014-02-13 09:21:16 -08:00
|
|
|
struct obs_view *view = &obs->data.main_view;
|
2014-01-04 12:38:56 -08:00
|
|
|
struct calldata params = {0};
|
2013-11-20 14:00:16 -08:00
|
|
|
|
2014-02-13 09:21:16 -08:00
|
|
|
pthread_mutex_lock(&view->channels_mutex);
|
2014-02-13 07:58:31 -08:00
|
|
|
|
2014-03-07 16:03:34 -08:00
|
|
|
obs_source_addref(source);
|
|
|
|
|
2014-02-13 09:21:16 -08:00
|
|
|
prev_source = view->channels[channel];
|
2014-01-04 12:38:56 -08:00
|
|
|
|
2014-08-05 17:49:28 -07:00
|
|
|
calldata_set_int(¶ms, "channel", channel);
|
|
|
|
calldata_set_ptr(¶ms, "prev_source", prev_source);
|
|
|
|
calldata_set_ptr(¶ms, "source", source);
|
2014-03-01 04:54:55 -08:00
|
|
|
signal_handler_signal(obs->signals, "channel_change", ¶ms);
|
2014-08-05 17:49:28 -07:00
|
|
|
calldata_get_ptr(¶ms, "source", &source);
|
2014-01-04 12:38:56 -08:00
|
|
|
calldata_free(¶ms);
|
|
|
|
|
2014-02-13 09:21:16 -08:00
|
|
|
view->channels[channel] = source;
|
2013-11-20 14:00:16 -08:00
|
|
|
|
2014-03-07 16:03:34 -08:00
|
|
|
pthread_mutex_unlock(&view->channels_mutex);
|
|
|
|
|
|
|
|
if (source)
|
2014-02-23 16:46:00 -08:00
|
|
|
obs_source_activate(source, MAIN_VIEW);
|
2014-02-20 21:04:14 -08:00
|
|
|
|
|
|
|
if (prev_source) {
|
2014-02-23 16:46:00 -08:00
|
|
|
obs_source_deactivate(prev_source, MAIN_VIEW);
|
2014-02-13 07:58:31 -08:00
|
|
|
obs_source_release(prev_source);
|
2014-02-20 21:04:14 -08:00
|
|
|
}
|
2013-09-30 19:37:13 -07:00
|
|
|
}
|
2013-11-22 15:18:31 -08:00
|
|
|
|
2014-09-25 17:44:05 -07:00
|
|
|
void obs_enum_sources(bool (*enum_proc)(void*, obs_source_t*), void *param)
|
Add preliminary output/encoder interface
- First, I redid the output interface for libobs. I feel like it's
going in a pretty good direction in terms of design.
Right now, the design is so that outputs and encoders are separate.
One or more outputs can connect to a specific encoder to receive its
data, or the output can connect directly to raw data from libobs
output itself, if the output doesn't want to use a designated encoder.
Data is received via callbacks set when you connect to the encoder or
raw output. Multiple outputs can receive the data from a single
encoder context if need be (such as for streaming to multiple channels
at once, and/or recording with the same data).
When an encoder is first connected to, it will connect to raw output,
and start encoding. Additional connections will receive that same
data being encoded as well after that. When the last encoder has
disconnected, it will stop encoding. If for some reason the encoder
needs to stop, it will use the callback with NULL to signal that
encoding has stopped. Some of these things may be subject to change
in the future, though it feels pretty good with this design so far.
Will have to see how well it works out in practice versus theory.
- Second, Started adding preliminary RTMP/x264 output plugin code.
To speed things up, I might just make a direct raw->FFmpeg output to
create a quick output plugin that we can start using for testing all
the subsystems.
2014-01-16 21:34:51 -08:00
|
|
|
{
|
2015-10-29 13:26:19 -07:00
|
|
|
obs_source_t *source;
|
|
|
|
|
2014-02-23 21:39:33 -08:00
|
|
|
if (!obs) return;
|
|
|
|
|
2015-10-29 13:26:19 -07:00
|
|
|
pthread_mutex_lock(&obs->data.sources_mutex);
|
|
|
|
source = obs->data.first_source;
|
Add preliminary output/encoder interface
- First, I redid the output interface for libobs. I feel like it's
going in a pretty good direction in terms of design.
Right now, the design is so that outputs and encoders are separate.
One or more outputs can connect to a specific encoder to receive its
data, or the output can connect directly to raw data from libobs
output itself, if the output doesn't want to use a designated encoder.
Data is received via callbacks set when you connect to the encoder or
raw output. Multiple outputs can receive the data from a single
encoder context if need be (such as for streaming to multiple channels
at once, and/or recording with the same data).
When an encoder is first connected to, it will connect to raw output,
and start encoding. Additional connections will receive that same
data being encoded as well after that. When the last encoder has
disconnected, it will stop encoding. If for some reason the encoder
needs to stop, it will use the callback with NULL to signal that
encoding has stopped. Some of these things may be subject to change
in the future, though it feels pretty good with this design so far.
Will have to see how well it works out in practice versus theory.
- Second, Started adding preliminary RTMP/x264 output plugin code.
To speed things up, I might just make a direct raw->FFmpeg output to
create a quick output plugin that we can start using for testing all
the subsystems.
2014-01-16 21:34:51 -08:00
|
|
|
|
2015-10-29 13:26:19 -07:00
|
|
|
while (source) {
|
|
|
|
obs_source_t *next_source =
|
|
|
|
(obs_source_t*)source->context.next;
|
2015-06-27 18:14:33 -07:00
|
|
|
|
2015-10-29 13:26:19 -07:00
|
|
|
if ((source->info.type == OBS_SOURCE_TYPE_INPUT) != 0 &&
|
|
|
|
!enum_proc(param, source))
|
Add preliminary output/encoder interface
- First, I redid the output interface for libobs. I feel like it's
going in a pretty good direction in terms of design.
Right now, the design is so that outputs and encoders are separate.
One or more outputs can connect to a specific encoder to receive its
data, or the output can connect directly to raw data from libobs
output itself, if the output doesn't want to use a designated encoder.
Data is received via callbacks set when you connect to the encoder or
raw output. Multiple outputs can receive the data from a single
encoder context if need be (such as for streaming to multiple channels
at once, and/or recording with the same data).
When an encoder is first connected to, it will connect to raw output,
and start encoding. Additional connections will receive that same
data being encoded as well after that. When the last encoder has
disconnected, it will stop encoding. If for some reason the encoder
needs to stop, it will use the callback with NULL to signal that
encoding has stopped. Some of these things may be subject to change
in the future, though it feels pretty good with this design so far.
Will have to see how well it works out in practice versus theory.
- Second, Started adding preliminary RTMP/x264 output plugin code.
To speed things up, I might just make a direct raw->FFmpeg output to
create a quick output plugin that we can start using for testing all
the subsystems.
2014-01-16 21:34:51 -08:00
|
|
|
break;
|
2015-06-27 18:14:33 -07:00
|
|
|
|
2015-10-29 13:26:19 -07:00
|
|
|
source = next_source;
|
libobs: Add services API, reduce repeated code
Add API for streaming services. The services API simplifies the
creation of custom service features and user interface.
Custom streaming services later on will be able to do things such as:
- Be able to use service-specific APIs via modules, allowing a more
direct means of communicating with the service and requesting or
setting service-specific information
- Get URL/stream key via other means of authentication such as OAuth,
or be able to build custom URLs for services that require that sort
of thing.
- Query information (such as viewer count, chat, follower
notifications, and other information)
- Set channel information (such as current game, current channel title,
activating commercials)
Also, I reduce some repeated code that was used for all libobs objects.
This includes the name of the object, the private data, settings, as
well as the signal and procedure handlers.
I also switched to using linked lists for the global object lists,
rather than using an array of pointers (you could say it was..
pointless.) ..Anyway, the linked list info is also stored in the shared
context data structure.
2014-04-19 20:38:53 -07:00
|
|
|
}
|
Add preliminary output/encoder interface
- First, I redid the output interface for libobs. I feel like it's
going in a pretty good direction in terms of design.
Right now, the design is so that outputs and encoders are separate.
One or more outputs can connect to a specific encoder to receive its
data, or the output can connect directly to raw data from libobs
output itself, if the output doesn't want to use a designated encoder.
Data is received via callbacks set when you connect to the encoder or
raw output. Multiple outputs can receive the data from a single
encoder context if need be (such as for streaming to multiple channels
at once, and/or recording with the same data).
When an encoder is first connected to, it will connect to raw output,
and start encoding. Additional connections will receive that same
data being encoded as well after that. When the last encoder has
disconnected, it will stop encoding. If for some reason the encoder
needs to stop, it will use the callback with NULL to signal that
encoding has stopped. Some of these things may be subject to change
in the future, though it feels pretty good with this design so far.
Will have to see how well it works out in practice versus theory.
- Second, Started adding preliminary RTMP/x264 output plugin code.
To speed things up, I might just make a direct raw->FFmpeg output to
create a quick output plugin that we can start using for testing all
the subsystems.
2014-01-16 21:34:51 -08:00
|
|
|
|
2015-10-29 13:26:19 -07:00
|
|
|
pthread_mutex_unlock(&obs->data.sources_mutex);
|
Add preliminary output/encoder interface
- First, I redid the output interface for libobs. I feel like it's
going in a pretty good direction in terms of design.
Right now, the design is so that outputs and encoders are separate.
One or more outputs can connect to a specific encoder to receive its
data, or the output can connect directly to raw data from libobs
output itself, if the output doesn't want to use a designated encoder.
Data is received via callbacks set when you connect to the encoder or
raw output. Multiple outputs can receive the data from a single
encoder context if need be (such as for streaming to multiple channels
at once, and/or recording with the same data).
When an encoder is first connected to, it will connect to raw output,
and start encoding. Additional connections will receive that same
data being encoded as well after that. When the last encoder has
disconnected, it will stop encoding. If for some reason the encoder
needs to stop, it will use the callback with NULL to signal that
encoding has stopped. Some of these things may be subject to change
in the future, though it feels pretty good with this design so far.
Will have to see how well it works out in practice versus theory.
- Second, Started adding preliminary RTMP/x264 output plugin code.
To speed things up, I might just make a direct raw->FFmpeg output to
create a quick output plugin that we can start using for testing all
the subsystems.
2014-01-16 21:34:51 -08:00
|
|
|
}
|
|
|
|
|
libobs: Add services API, reduce repeated code
Add API for streaming services. The services API simplifies the
creation of custom service features and user interface.
Custom streaming services later on will be able to do things such as:
- Be able to use service-specific APIs via modules, allowing a more
direct means of communicating with the service and requesting or
setting service-specific information
- Get URL/stream key via other means of authentication such as OAuth,
or be able to build custom URLs for services that require that sort
of thing.
- Query information (such as viewer count, chat, follower
notifications, and other information)
- Set channel information (such as current game, current channel title,
activating commercials)
Also, I reduce some repeated code that was used for all libobs objects.
This includes the name of the object, the private data, settings, as
well as the signal and procedure handlers.
I also switched to using linked lists for the global object lists,
rather than using an array of pointers (you could say it was..
pointless.) ..Anyway, the linked list info is also stored in the shared
context data structure.
2014-04-19 20:38:53 -07:00
|
|
|
static inline void obs_enum(void *pstart, pthread_mutex_t *mutex, void *proc,
|
|
|
|
void *param)
|
Add preliminary output/encoder interface
- First, I redid the output interface for libobs. I feel like it's
going in a pretty good direction in terms of design.
Right now, the design is so that outputs and encoders are separate.
One or more outputs can connect to a specific encoder to receive its
data, or the output can connect directly to raw data from libobs
output itself, if the output doesn't want to use a designated encoder.
Data is received via callbacks set when you connect to the encoder or
raw output. Multiple outputs can receive the data from a single
encoder context if need be (such as for streaming to multiple channels
at once, and/or recording with the same data).
When an encoder is first connected to, it will connect to raw output,
and start encoding. Additional connections will receive that same
data being encoded as well after that. When the last encoder has
disconnected, it will stop encoding. If for some reason the encoder
needs to stop, it will use the callback with NULL to signal that
encoding has stopped. Some of these things may be subject to change
in the future, though it feels pretty good with this design so far.
Will have to see how well it works out in practice versus theory.
- Second, Started adding preliminary RTMP/x264 output plugin code.
To speed things up, I might just make a direct raw->FFmpeg output to
create a quick output plugin that we can start using for testing all
the subsystems.
2014-01-16 21:34:51 -08:00
|
|
|
{
|
libobs: Add services API, reduce repeated code
Add API for streaming services. The services API simplifies the
creation of custom service features and user interface.
Custom streaming services later on will be able to do things such as:
- Be able to use service-specific APIs via modules, allowing a more
direct means of communicating with the service and requesting or
setting service-specific information
- Get URL/stream key via other means of authentication such as OAuth,
or be able to build custom URLs for services that require that sort
of thing.
- Query information (such as viewer count, chat, follower
notifications, and other information)
- Set channel information (such as current game, current channel title,
activating commercials)
Also, I reduce some repeated code that was used for all libobs objects.
This includes the name of the object, the private data, settings, as
well as the signal and procedure handlers.
I also switched to using linked lists for the global object lists,
rather than using an array of pointers (you could say it was..
pointless.) ..Anyway, the linked list info is also stored in the shared
context data structure.
2014-04-19 20:38:53 -07:00
|
|
|
struct obs_context_data **start = pstart, *context;
|
|
|
|
bool (*enum_proc)(void*, void*) = proc;
|
Add preliminary output/encoder interface
- First, I redid the output interface for libobs. I feel like it's
going in a pretty good direction in terms of design.
Right now, the design is so that outputs and encoders are separate.
One or more outputs can connect to a specific encoder to receive its
data, or the output can connect directly to raw data from libobs
output itself, if the output doesn't want to use a designated encoder.
Data is received via callbacks set when you connect to the encoder or
raw output. Multiple outputs can receive the data from a single
encoder context if need be (such as for streaming to multiple channels
at once, and/or recording with the same data).
When an encoder is first connected to, it will connect to raw output,
and start encoding. Additional connections will receive that same
data being encoded as well after that. When the last encoder has
disconnected, it will stop encoding. If for some reason the encoder
needs to stop, it will use the callback with NULL to signal that
encoding has stopped. Some of these things may be subject to change
in the future, though it feels pretty good with this design so far.
Will have to see how well it works out in practice versus theory.
- Second, Started adding preliminary RTMP/x264 output plugin code.
To speed things up, I might just make a direct raw->FFmpeg output to
create a quick output plugin that we can start using for testing all
the subsystems.
2014-01-16 21:34:51 -08:00
|
|
|
|
libobs: Add services API, reduce repeated code
Add API for streaming services. The services API simplifies the
creation of custom service features and user interface.
Custom streaming services later on will be able to do things such as:
- Be able to use service-specific APIs via modules, allowing a more
direct means of communicating with the service and requesting or
setting service-specific information
- Get URL/stream key via other means of authentication such as OAuth,
or be able to build custom URLs for services that require that sort
of thing.
- Query information (such as viewer count, chat, follower
notifications, and other information)
- Set channel information (such as current game, current channel title,
activating commercials)
Also, I reduce some repeated code that was used for all libobs objects.
This includes the name of the object, the private data, settings, as
well as the signal and procedure handlers.
I also switched to using linked lists for the global object lists,
rather than using an array of pointers (you could say it was..
pointless.) ..Anyway, the linked list info is also stored in the shared
context data structure.
2014-04-19 20:38:53 -07:00
|
|
|
assert(start);
|
|
|
|
assert(mutex);
|
|
|
|
assert(enum_proc);
|
2014-02-23 21:39:33 -08:00
|
|
|
|
libobs: Add services API, reduce repeated code
Add API for streaming services. The services API simplifies the
creation of custom service features and user interface.
Custom streaming services later on will be able to do things such as:
- Be able to use service-specific APIs via modules, allowing a more
direct means of communicating with the service and requesting or
setting service-specific information
- Get URL/stream key via other means of authentication such as OAuth,
or be able to build custom URLs for services that require that sort
of thing.
- Query information (such as viewer count, chat, follower
notifications, and other information)
- Set channel information (such as current game, current channel title,
activating commercials)
Also, I reduce some repeated code that was used for all libobs objects.
This includes the name of the object, the private data, settings, as
well as the signal and procedure handlers.
I also switched to using linked lists for the global object lists,
rather than using an array of pointers (you could say it was..
pointless.) ..Anyway, the linked list info is also stored in the shared
context data structure.
2014-04-19 20:38:53 -07:00
|
|
|
pthread_mutex_lock(mutex);
|
Add preliminary output/encoder interface
- First, I redid the output interface for libobs. I feel like it's
going in a pretty good direction in terms of design.
Right now, the design is so that outputs and encoders are separate.
One or more outputs can connect to a specific encoder to receive its
data, or the output can connect directly to raw data from libobs
output itself, if the output doesn't want to use a designated encoder.
Data is received via callbacks set when you connect to the encoder or
raw output. Multiple outputs can receive the data from a single
encoder context if need be (such as for streaming to multiple channels
at once, and/or recording with the same data).
When an encoder is first connected to, it will connect to raw output,
and start encoding. Additional connections will receive that same
data being encoded as well after that. When the last encoder has
disconnected, it will stop encoding. If for some reason the encoder
needs to stop, it will use the callback with NULL to signal that
encoding has stopped. Some of these things may be subject to change
in the future, though it feels pretty good with this design so far.
Will have to see how well it works out in practice versus theory.
- Second, Started adding preliminary RTMP/x264 output plugin code.
To speed things up, I might just make a direct raw->FFmpeg output to
create a quick output plugin that we can start using for testing all
the subsystems.
2014-01-16 21:34:51 -08:00
|
|
|
|
libobs: Add services API, reduce repeated code
Add API for streaming services. The services API simplifies the
creation of custom service features and user interface.
Custom streaming services later on will be able to do things such as:
- Be able to use service-specific APIs via modules, allowing a more
direct means of communicating with the service and requesting or
setting service-specific information
- Get URL/stream key via other means of authentication such as OAuth,
or be able to build custom URLs for services that require that sort
of thing.
- Query information (such as viewer count, chat, follower
notifications, and other information)
- Set channel information (such as current game, current channel title,
activating commercials)
Also, I reduce some repeated code that was used for all libobs objects.
This includes the name of the object, the private data, settings, as
well as the signal and procedure handlers.
I also switched to using linked lists for the global object lists,
rather than using an array of pointers (you could say it was..
pointless.) ..Anyway, the linked list info is also stored in the shared
context data structure.
2014-04-19 20:38:53 -07:00
|
|
|
context = *start;
|
|
|
|
while (context) {
|
|
|
|
if (!enum_proc(param, context))
|
Add preliminary output/encoder interface
- First, I redid the output interface for libobs. I feel like it's
going in a pretty good direction in terms of design.
Right now, the design is so that outputs and encoders are separate.
One or more outputs can connect to a specific encoder to receive its
data, or the output can connect directly to raw data from libobs
output itself, if the output doesn't want to use a designated encoder.
Data is received via callbacks set when you connect to the encoder or
raw output. Multiple outputs can receive the data from a single
encoder context if need be (such as for streaming to multiple channels
at once, and/or recording with the same data).
When an encoder is first connected to, it will connect to raw output,
and start encoding. Additional connections will receive that same
data being encoded as well after that. When the last encoder has
disconnected, it will stop encoding. If for some reason the encoder
needs to stop, it will use the callback with NULL to signal that
encoding has stopped. Some of these things may be subject to change
in the future, though it feels pretty good with this design so far.
Will have to see how well it works out in practice versus theory.
- Second, Started adding preliminary RTMP/x264 output plugin code.
To speed things up, I might just make a direct raw->FFmpeg output to
create a quick output plugin that we can start using for testing all
the subsystems.
2014-01-16 21:34:51 -08:00
|
|
|
break;
|
|
|
|
|
libobs: Add services API, reduce repeated code
Add API for streaming services. The services API simplifies the
creation of custom service features and user interface.
Custom streaming services later on will be able to do things such as:
- Be able to use service-specific APIs via modules, allowing a more
direct means of communicating with the service and requesting or
setting service-specific information
- Get URL/stream key via other means of authentication such as OAuth,
or be able to build custom URLs for services that require that sort
of thing.
- Query information (such as viewer count, chat, follower
notifications, and other information)
- Set channel information (such as current game, current channel title,
activating commercials)
Also, I reduce some repeated code that was used for all libobs objects.
This includes the name of the object, the private data, settings, as
well as the signal and procedure handlers.
I also switched to using linked lists for the global object lists,
rather than using an array of pointers (you could say it was..
pointless.) ..Anyway, the linked list info is also stored in the shared
context data structure.
2014-04-19 20:38:53 -07:00
|
|
|
context = context->next;
|
|
|
|
}
|
|
|
|
|
|
|
|
pthread_mutex_unlock(mutex);
|
Add preliminary output/encoder interface
- First, I redid the output interface for libobs. I feel like it's
going in a pretty good direction in terms of design.
Right now, the design is so that outputs and encoders are separate.
One or more outputs can connect to a specific encoder to receive its
data, or the output can connect directly to raw data from libobs
output itself, if the output doesn't want to use a designated encoder.
Data is received via callbacks set when you connect to the encoder or
raw output. Multiple outputs can receive the data from a single
encoder context if need be (such as for streaming to multiple channels
at once, and/or recording with the same data).
When an encoder is first connected to, it will connect to raw output,
and start encoding. Additional connections will receive that same
data being encoded as well after that. When the last encoder has
disconnected, it will stop encoding. If for some reason the encoder
needs to stop, it will use the callback with NULL to signal that
encoding has stopped. Some of these things may be subject to change
in the future, though it feels pretty good with this design so far.
Will have to see how well it works out in practice versus theory.
- Second, Started adding preliminary RTMP/x264 output plugin code.
To speed things up, I might just make a direct raw->FFmpeg output to
create a quick output plugin that we can start using for testing all
the subsystems.
2014-01-16 21:34:51 -08:00
|
|
|
}
|
|
|
|
|
2014-09-25 17:44:05 -07:00
|
|
|
void obs_enum_outputs(bool (*enum_proc)(void*, obs_output_t*), void *param)
|
2013-11-22 15:18:31 -08:00
|
|
|
{
|
2014-02-23 21:39:33 -08:00
|
|
|
if (!obs) return;
|
libobs: Add services API, reduce repeated code
Add API for streaming services. The services API simplifies the
creation of custom service features and user interface.
Custom streaming services later on will be able to do things such as:
- Be able to use service-specific APIs via modules, allowing a more
direct means of communicating with the service and requesting or
setting service-specific information
- Get URL/stream key via other means of authentication such as OAuth,
or be able to build custom URLs for services that require that sort
of thing.
- Query information (such as viewer count, chat, follower
notifications, and other information)
- Set channel information (such as current game, current channel title,
activating commercials)
Also, I reduce some repeated code that was used for all libobs objects.
This includes the name of the object, the private data, settings, as
well as the signal and procedure handlers.
I also switched to using linked lists for the global object lists,
rather than using an array of pointers (you could say it was..
pointless.) ..Anyway, the linked list info is also stored in the shared
context data structure.
2014-04-19 20:38:53 -07:00
|
|
|
obs_enum(&obs->data.first_output, &obs->data.outputs_mutex,
|
|
|
|
enum_proc, param);
|
|
|
|
}
|
2014-02-23 21:39:33 -08:00
|
|
|
|
2014-09-25 17:44:05 -07:00
|
|
|
void obs_enum_encoders(bool (*enum_proc)(void*, obs_encoder_t*), void *param)
|
libobs: Add services API, reduce repeated code
Add API for streaming services. The services API simplifies the
creation of custom service features and user interface.
Custom streaming services later on will be able to do things such as:
- Be able to use service-specific APIs via modules, allowing a more
direct means of communicating with the service and requesting or
setting service-specific information
- Get URL/stream key via other means of authentication such as OAuth,
or be able to build custom URLs for services that require that sort
of thing.
- Query information (such as viewer count, chat, follower
notifications, and other information)
- Set channel information (such as current game, current channel title,
activating commercials)
Also, I reduce some repeated code that was used for all libobs objects.
This includes the name of the object, the private data, settings, as
well as the signal and procedure handlers.
I also switched to using linked lists for the global object lists,
rather than using an array of pointers (you could say it was..
pointless.) ..Anyway, the linked list info is also stored in the shared
context data structure.
2014-04-19 20:38:53 -07:00
|
|
|
{
|
|
|
|
if (!obs) return;
|
|
|
|
obs_enum(&obs->data.first_encoder, &obs->data.encoders_mutex,
|
|
|
|
enum_proc, param);
|
|
|
|
}
|
2013-11-22 15:18:31 -08:00
|
|
|
|
2014-09-25 17:44:05 -07:00
|
|
|
void obs_enum_services(bool (*enum_proc)(void*, obs_service_t*), void *param)
|
libobs: Add services API, reduce repeated code
Add API for streaming services. The services API simplifies the
creation of custom service features and user interface.
Custom streaming services later on will be able to do things such as:
- Be able to use service-specific APIs via modules, allowing a more
direct means of communicating with the service and requesting or
setting service-specific information
- Get URL/stream key via other means of authentication such as OAuth,
or be able to build custom URLs for services that require that sort
of thing.
- Query information (such as viewer count, chat, follower
notifications, and other information)
- Set channel information (such as current game, current channel title,
activating commercials)
Also, I reduce some repeated code that was used for all libobs objects.
This includes the name of the object, the private data, settings, as
well as the signal and procedure handlers.
I also switched to using linked lists for the global object lists,
rather than using an array of pointers (you could say it was..
pointless.) ..Anyway, the linked list info is also stored in the shared
context data structure.
2014-04-19 20:38:53 -07:00
|
|
|
{
|
|
|
|
if (!obs) return;
|
|
|
|
obs_enum(&obs->data.first_service, &obs->data.services_mutex,
|
|
|
|
enum_proc, param);
|
2013-11-22 15:18:31 -08:00
|
|
|
}
|
2013-12-20 18:35:12 -08:00
|
|
|
|
obs-studio UI: Implement stream settings UI
- Updated the services API so that it links up with an output and
the output gets data from that service rather than via settings.
This allows the service context to have control over how an output is
used, and makes it so that the URL/key/etc isn't necessarily some
static setting.
Also, if the service is attached to an output, it will stick around
until the output is destroyed.
- The settings interface has been updated so that it can allow the
usage of service plugins. What this means is that now you can create
a service plugin that can control aspects of the stream, and it
allows each service to create their own user interface if they create
a service plugin module.
- Testing out saving of current service information. Saves/loads from
JSON in to obs_data_t, seems to be working quite nicely, and the
service object information is saved/preserved on exit, and loaded
again on startup.
- I agonized over the settings user interface for days, and eventually
I just decided that the only way that users weren't going to be
fumbling over options was to split up the settings in to simple/basic
output, pre-configured, and then advanced for advanced use (such as
multiple outputs or services, which I'll implement later).
This was particularly painful to really design right, I wanted more
features and wanted to include everything in one interface but
ultimately just realized from experience that users are just not
technically knowledgable about it and will end up fumbling with the
settings rather than getting things done.
Basically, what this means is that casual users only have to enter in
about 3 things to configure their stream: Stream key, audio bitrate,
and video bitrate. I am really happy with this interface for those
types of users, but it definitely won't be sufficient for advanced
usage or for custom outputs, so that stuff will have to be separated.
- Improved the JSON usage for the 'common streaming services' context,
I realized that JSON arrays are there to ensure sorting, while
forgetting that general items are optimized for hashing. So
basically I'm just using arrays now to sort items in it.
2014-04-24 01:49:07 -07:00
|
|
|
static inline void *get_context_by_name(void *vfirst, const char *name,
|
2015-05-03 16:37:14 -07:00
|
|
|
pthread_mutex_t *mutex, void *(*addref)(void*))
|
obs-studio UI: Implement stream settings UI
- Updated the services API so that it links up with an output and
the output gets data from that service rather than via settings.
This allows the service context to have control over how an output is
used, and makes it so that the URL/key/etc isn't necessarily some
static setting.
Also, if the service is attached to an output, it will stick around
until the output is destroyed.
- The settings interface has been updated so that it can allow the
usage of service plugins. What this means is that now you can create
a service plugin that can control aspects of the stream, and it
allows each service to create their own user interface if they create
a service plugin module.
- Testing out saving of current service information. Saves/loads from
JSON in to obs_data_t, seems to be working quite nicely, and the
service object information is saved/preserved on exit, and loaded
again on startup.
- I agonized over the settings user interface for days, and eventually
I just decided that the only way that users weren't going to be
fumbling over options was to split up the settings in to simple/basic
output, pre-configured, and then advanced for advanced use (such as
multiple outputs or services, which I'll implement later).
This was particularly painful to really design right, I wanted more
features and wanted to include everything in one interface but
ultimately just realized from experience that users are just not
technically knowledgable about it and will end up fumbling with the
settings rather than getting things done.
Basically, what this means is that casual users only have to enter in
about 3 things to configure their stream: Stream key, audio bitrate,
and video bitrate. I am really happy with this interface for those
types of users, but it definitely won't be sufficient for advanced
usage or for custom outputs, so that stuff will have to be separated.
- Improved the JSON usage for the 'common streaming services' context,
I realized that JSON arrays are there to ensure sorting, while
forgetting that general items are optimized for hashing. So
basically I'm just using arrays now to sort items in it.
2014-04-24 01:49:07 -07:00
|
|
|
{
|
|
|
|
struct obs_context_data **first = vfirst;
|
|
|
|
struct obs_context_data *context;
|
|
|
|
|
|
|
|
pthread_mutex_lock(mutex);
|
|
|
|
|
|
|
|
context = *first;
|
|
|
|
while (context) {
|
2016-01-09 13:05:44 -08:00
|
|
|
if (!context->private && strcmp(context->name, name) == 0) {
|
2015-05-03 16:37:14 -07:00
|
|
|
context = addref(context);
|
obs-studio UI: Implement stream settings UI
- Updated the services API so that it links up with an output and
the output gets data from that service rather than via settings.
This allows the service context to have control over how an output is
used, and makes it so that the URL/key/etc isn't necessarily some
static setting.
Also, if the service is attached to an output, it will stick around
until the output is destroyed.
- The settings interface has been updated so that it can allow the
usage of service plugins. What this means is that now you can create
a service plugin that can control aspects of the stream, and it
allows each service to create their own user interface if they create
a service plugin module.
- Testing out saving of current service information. Saves/loads from
JSON in to obs_data_t, seems to be working quite nicely, and the
service object information is saved/preserved on exit, and loaded
again on startup.
- I agonized over the settings user interface for days, and eventually
I just decided that the only way that users weren't going to be
fumbling over options was to split up the settings in to simple/basic
output, pre-configured, and then advanced for advanced use (such as
multiple outputs or services, which I'll implement later).
This was particularly painful to really design right, I wanted more
features and wanted to include everything in one interface but
ultimately just realized from experience that users are just not
technically knowledgable about it and will end up fumbling with the
settings rather than getting things done.
Basically, what this means is that casual users only have to enter in
about 3 things to configure their stream: Stream key, audio bitrate,
and video bitrate. I am really happy with this interface for those
types of users, but it definitely won't be sufficient for advanced
usage or for custom outputs, so that stuff will have to be separated.
- Improved the JSON usage for the 'common streaming services' context,
I realized that JSON arrays are there to ensure sorting, while
forgetting that general items are optimized for hashing. So
basically I'm just using arrays now to sort items in it.
2014-04-24 01:49:07 -07:00
|
|
|
break;
|
2015-05-03 16:37:14 -07:00
|
|
|
}
|
obs-studio UI: Implement stream settings UI
- Updated the services API so that it links up with an output and
the output gets data from that service rather than via settings.
This allows the service context to have control over how an output is
used, and makes it so that the URL/key/etc isn't necessarily some
static setting.
Also, if the service is attached to an output, it will stick around
until the output is destroyed.
- The settings interface has been updated so that it can allow the
usage of service plugins. What this means is that now you can create
a service plugin that can control aspects of the stream, and it
allows each service to create their own user interface if they create
a service plugin module.
- Testing out saving of current service information. Saves/loads from
JSON in to obs_data_t, seems to be working quite nicely, and the
service object information is saved/preserved on exit, and loaded
again on startup.
- I agonized over the settings user interface for days, and eventually
I just decided that the only way that users weren't going to be
fumbling over options was to split up the settings in to simple/basic
output, pre-configured, and then advanced for advanced use (such as
multiple outputs or services, which I'll implement later).
This was particularly painful to really design right, I wanted more
features and wanted to include everything in one interface but
ultimately just realized from experience that users are just not
technically knowledgable about it and will end up fumbling with the
settings rather than getting things done.
Basically, what this means is that casual users only have to enter in
about 3 things to configure their stream: Stream key, audio bitrate,
and video bitrate. I am really happy with this interface for those
types of users, but it definitely won't be sufficient for advanced
usage or for custom outputs, so that stuff will have to be separated.
- Improved the JSON usage for the 'common streaming services' context,
I realized that JSON arrays are there to ensure sorting, while
forgetting that general items are optimized for hashing. So
basically I'm just using arrays now to sort items in it.
2014-04-24 01:49:07 -07:00
|
|
|
context = context->next;
|
|
|
|
}
|
|
|
|
|
|
|
|
pthread_mutex_unlock(mutex);
|
|
|
|
return context;
|
|
|
|
}
|
|
|
|
|
2016-01-09 13:37:27 -08:00
|
|
|
static inline void *obs_source_addref_safe_(void *ref)
|
|
|
|
{
|
|
|
|
return obs_source_get_ref(ref);
|
|
|
|
}
|
|
|
|
|
2015-05-03 16:55:43 -07:00
|
|
|
static inline void *obs_output_addref_safe_(void *ref)
|
|
|
|
{
|
|
|
|
return obs_output_get_ref(ref);
|
|
|
|
}
|
|
|
|
|
2015-05-03 17:01:38 -07:00
|
|
|
static inline void *obs_encoder_addref_safe_(void *ref)
|
|
|
|
{
|
|
|
|
return obs_encoder_get_ref(ref);
|
|
|
|
}
|
|
|
|
|
2015-05-03 17:07:43 -07:00
|
|
|
static inline void *obs_service_addref_safe_(void *ref)
|
|
|
|
{
|
|
|
|
return obs_service_get_ref(ref);
|
|
|
|
}
|
|
|
|
|
2015-05-03 16:37:14 -07:00
|
|
|
static inline void *obs_id_(void *data)
|
|
|
|
{
|
|
|
|
return data;
|
|
|
|
}
|
|
|
|
|
2016-01-09 13:37:27 -08:00
|
|
|
obs_source_t *obs_get_source_by_name(const char *name)
|
|
|
|
{
|
|
|
|
if (!obs) return NULL;
|
|
|
|
return get_context_by_name(&obs->data.first_source, name,
|
|
|
|
&obs->data.sources_mutex, obs_source_addref_safe_);
|
|
|
|
}
|
|
|
|
|
2014-09-25 17:44:05 -07:00
|
|
|
obs_output_t *obs_get_output_by_name(const char *name)
|
obs-studio UI: Implement stream settings UI
- Updated the services API so that it links up with an output and
the output gets data from that service rather than via settings.
This allows the service context to have control over how an output is
used, and makes it so that the URL/key/etc isn't necessarily some
static setting.
Also, if the service is attached to an output, it will stick around
until the output is destroyed.
- The settings interface has been updated so that it can allow the
usage of service plugins. What this means is that now you can create
a service plugin that can control aspects of the stream, and it
allows each service to create their own user interface if they create
a service plugin module.
- Testing out saving of current service information. Saves/loads from
JSON in to obs_data_t, seems to be working quite nicely, and the
service object information is saved/preserved on exit, and loaded
again on startup.
- I agonized over the settings user interface for days, and eventually
I just decided that the only way that users weren't going to be
fumbling over options was to split up the settings in to simple/basic
output, pre-configured, and then advanced for advanced use (such as
multiple outputs or services, which I'll implement later).
This was particularly painful to really design right, I wanted more
features and wanted to include everything in one interface but
ultimately just realized from experience that users are just not
technically knowledgable about it and will end up fumbling with the
settings rather than getting things done.
Basically, what this means is that casual users only have to enter in
about 3 things to configure their stream: Stream key, audio bitrate,
and video bitrate. I am really happy with this interface for those
types of users, but it definitely won't be sufficient for advanced
usage or for custom outputs, so that stuff will have to be separated.
- Improved the JSON usage for the 'common streaming services' context,
I realized that JSON arrays are there to ensure sorting, while
forgetting that general items are optimized for hashing. So
basically I'm just using arrays now to sort items in it.
2014-04-24 01:49:07 -07:00
|
|
|
{
|
|
|
|
if (!obs) return NULL;
|
|
|
|
return get_context_by_name(&obs->data.first_output, name,
|
2015-05-03 16:55:43 -07:00
|
|
|
&obs->data.outputs_mutex, obs_output_addref_safe_);
|
obs-studio UI: Implement stream settings UI
- Updated the services API so that it links up with an output and
the output gets data from that service rather than via settings.
This allows the service context to have control over how an output is
used, and makes it so that the URL/key/etc isn't necessarily some
static setting.
Also, if the service is attached to an output, it will stick around
until the output is destroyed.
- The settings interface has been updated so that it can allow the
usage of service plugins. What this means is that now you can create
a service plugin that can control aspects of the stream, and it
allows each service to create their own user interface if they create
a service plugin module.
- Testing out saving of current service information. Saves/loads from
JSON in to obs_data_t, seems to be working quite nicely, and the
service object information is saved/preserved on exit, and loaded
again on startup.
- I agonized over the settings user interface for days, and eventually
I just decided that the only way that users weren't going to be
fumbling over options was to split up the settings in to simple/basic
output, pre-configured, and then advanced for advanced use (such as
multiple outputs or services, which I'll implement later).
This was particularly painful to really design right, I wanted more
features and wanted to include everything in one interface but
ultimately just realized from experience that users are just not
technically knowledgable about it and will end up fumbling with the
settings rather than getting things done.
Basically, what this means is that casual users only have to enter in
about 3 things to configure their stream: Stream key, audio bitrate,
and video bitrate. I am really happy with this interface for those
types of users, but it definitely won't be sufficient for advanced
usage or for custom outputs, so that stuff will have to be separated.
- Improved the JSON usage for the 'common streaming services' context,
I realized that JSON arrays are there to ensure sorting, while
forgetting that general items are optimized for hashing. So
basically I'm just using arrays now to sort items in it.
2014-04-24 01:49:07 -07:00
|
|
|
}
|
|
|
|
|
2014-09-25 17:44:05 -07:00
|
|
|
obs_encoder_t *obs_get_encoder_by_name(const char *name)
|
obs-studio UI: Implement stream settings UI
- Updated the services API so that it links up with an output and
the output gets data from that service rather than via settings.
This allows the service context to have control over how an output is
used, and makes it so that the URL/key/etc isn't necessarily some
static setting.
Also, if the service is attached to an output, it will stick around
until the output is destroyed.
- The settings interface has been updated so that it can allow the
usage of service plugins. What this means is that now you can create
a service plugin that can control aspects of the stream, and it
allows each service to create their own user interface if they create
a service plugin module.
- Testing out saving of current service information. Saves/loads from
JSON in to obs_data_t, seems to be working quite nicely, and the
service object information is saved/preserved on exit, and loaded
again on startup.
- I agonized over the settings user interface for days, and eventually
I just decided that the only way that users weren't going to be
fumbling over options was to split up the settings in to simple/basic
output, pre-configured, and then advanced for advanced use (such as
multiple outputs or services, which I'll implement later).
This was particularly painful to really design right, I wanted more
features and wanted to include everything in one interface but
ultimately just realized from experience that users are just not
technically knowledgable about it and will end up fumbling with the
settings rather than getting things done.
Basically, what this means is that casual users only have to enter in
about 3 things to configure their stream: Stream key, audio bitrate,
and video bitrate. I am really happy with this interface for those
types of users, but it definitely won't be sufficient for advanced
usage or for custom outputs, so that stuff will have to be separated.
- Improved the JSON usage for the 'common streaming services' context,
I realized that JSON arrays are there to ensure sorting, while
forgetting that general items are optimized for hashing. So
basically I'm just using arrays now to sort items in it.
2014-04-24 01:49:07 -07:00
|
|
|
{
|
|
|
|
if (!obs) return NULL;
|
|
|
|
return get_context_by_name(&obs->data.first_encoder, name,
|
2015-05-03 17:01:38 -07:00
|
|
|
&obs->data.encoders_mutex, obs_encoder_addref_safe_);
|
obs-studio UI: Implement stream settings UI
- Updated the services API so that it links up with an output and
the output gets data from that service rather than via settings.
This allows the service context to have control over how an output is
used, and makes it so that the URL/key/etc isn't necessarily some
static setting.
Also, if the service is attached to an output, it will stick around
until the output is destroyed.
- The settings interface has been updated so that it can allow the
usage of service plugins. What this means is that now you can create
a service plugin that can control aspects of the stream, and it
allows each service to create their own user interface if they create
a service plugin module.
- Testing out saving of current service information. Saves/loads from
JSON in to obs_data_t, seems to be working quite nicely, and the
service object information is saved/preserved on exit, and loaded
again on startup.
- I agonized over the settings user interface for days, and eventually
I just decided that the only way that users weren't going to be
fumbling over options was to split up the settings in to simple/basic
output, pre-configured, and then advanced for advanced use (such as
multiple outputs or services, which I'll implement later).
This was particularly painful to really design right, I wanted more
features and wanted to include everything in one interface but
ultimately just realized from experience that users are just not
technically knowledgable about it and will end up fumbling with the
settings rather than getting things done.
Basically, what this means is that casual users only have to enter in
about 3 things to configure their stream: Stream key, audio bitrate,
and video bitrate. I am really happy with this interface for those
types of users, but it definitely won't be sufficient for advanced
usage or for custom outputs, so that stuff will have to be separated.
- Improved the JSON usage for the 'common streaming services' context,
I realized that JSON arrays are there to ensure sorting, while
forgetting that general items are optimized for hashing. So
basically I'm just using arrays now to sort items in it.
2014-04-24 01:49:07 -07:00
|
|
|
}
|
|
|
|
|
2014-09-25 17:44:05 -07:00
|
|
|
obs_service_t *obs_get_service_by_name(const char *name)
|
obs-studio UI: Implement stream settings UI
- Updated the services API so that it links up with an output and
the output gets data from that service rather than via settings.
This allows the service context to have control over how an output is
used, and makes it so that the URL/key/etc isn't necessarily some
static setting.
Also, if the service is attached to an output, it will stick around
until the output is destroyed.
- The settings interface has been updated so that it can allow the
usage of service plugins. What this means is that now you can create
a service plugin that can control aspects of the stream, and it
allows each service to create their own user interface if they create
a service plugin module.
- Testing out saving of current service information. Saves/loads from
JSON in to obs_data_t, seems to be working quite nicely, and the
service object information is saved/preserved on exit, and loaded
again on startup.
- I agonized over the settings user interface for days, and eventually
I just decided that the only way that users weren't going to be
fumbling over options was to split up the settings in to simple/basic
output, pre-configured, and then advanced for advanced use (such as
multiple outputs or services, which I'll implement later).
This was particularly painful to really design right, I wanted more
features and wanted to include everything in one interface but
ultimately just realized from experience that users are just not
technically knowledgable about it and will end up fumbling with the
settings rather than getting things done.
Basically, what this means is that casual users only have to enter in
about 3 things to configure their stream: Stream key, audio bitrate,
and video bitrate. I am really happy with this interface for those
types of users, but it definitely won't be sufficient for advanced
usage or for custom outputs, so that stuff will have to be separated.
- Improved the JSON usage for the 'common streaming services' context,
I realized that JSON arrays are there to ensure sorting, while
forgetting that general items are optimized for hashing. So
basically I'm just using arrays now to sort items in it.
2014-04-24 01:49:07 -07:00
|
|
|
{
|
|
|
|
if (!obs) return NULL;
|
|
|
|
return get_context_by_name(&obs->data.first_service, name,
|
2015-05-03 17:07:43 -07:00
|
|
|
&obs->data.services_mutex, obs_service_addref_safe_);
|
obs-studio UI: Implement stream settings UI
- Updated the services API so that it links up with an output and
the output gets data from that service rather than via settings.
This allows the service context to have control over how an output is
used, and makes it so that the URL/key/etc isn't necessarily some
static setting.
Also, if the service is attached to an output, it will stick around
until the output is destroyed.
- The settings interface has been updated so that it can allow the
usage of service plugins. What this means is that now you can create
a service plugin that can control aspects of the stream, and it
allows each service to create their own user interface if they create
a service plugin module.
- Testing out saving of current service information. Saves/loads from
JSON in to obs_data_t, seems to be working quite nicely, and the
service object information is saved/preserved on exit, and loaded
again on startup.
- I agonized over the settings user interface for days, and eventually
I just decided that the only way that users weren't going to be
fumbling over options was to split up the settings in to simple/basic
output, pre-configured, and then advanced for advanced use (such as
multiple outputs or services, which I'll implement later).
This was particularly painful to really design right, I wanted more
features and wanted to include everything in one interface but
ultimately just realized from experience that users are just not
technically knowledgable about it and will end up fumbling with the
settings rather than getting things done.
Basically, what this means is that casual users only have to enter in
about 3 things to configure their stream: Stream key, audio bitrate,
and video bitrate. I am really happy with this interface for those
types of users, but it definitely won't be sufficient for advanced
usage or for custom outputs, so that stuff will have to be separated.
- Improved the JSON usage for the 'common streaming services' context,
I realized that JSON arrays are there to ensure sorting, while
forgetting that general items are optimized for hashing. So
basically I'm just using arrays now to sort items in it.
2014-04-24 01:49:07 -07:00
|
|
|
}
|
|
|
|
|
2015-10-16 07:31:52 -07:00
|
|
|
gs_effect_t *obs_get_base_effect(enum obs_base_effect effect)
|
2013-12-22 00:30:18 -08:00
|
|
|
{
|
2014-02-13 07:58:31 -08:00
|
|
|
if (!obs) return NULL;
|
2015-04-06 07:02:44 -07:00
|
|
|
|
2015-10-16 07:31:52 -07:00
|
|
|
switch (effect) {
|
|
|
|
case OBS_EFFECT_DEFAULT:
|
|
|
|
return obs->video.default_effect;
|
|
|
|
case OBS_EFFECT_DEFAULT_RECT:
|
|
|
|
return obs->video.default_rect_effect;
|
|
|
|
case OBS_EFFECT_OPAQUE:
|
|
|
|
return obs->video.opaque_effect;
|
|
|
|
case OBS_EFFECT_SOLID:
|
|
|
|
return obs->video.solid_effect;
|
|
|
|
case OBS_EFFECT_BICUBIC:
|
|
|
|
return obs->video.bicubic_effect;
|
|
|
|
case OBS_EFFECT_LANCZOS:
|
|
|
|
return obs->video.lanczos_effect;
|
|
|
|
case OBS_EFFECT_BILINEAR_LOWRES:
|
|
|
|
return obs->video.bilinear_lowres_effect;
|
2016-03-26 21:41:49 -07:00
|
|
|
case OBS_EFFECT_PREMULTIPLIED_ALPHA:
|
|
|
|
return obs->video.premultiplied_alpha_effect;
|
2015-10-16 07:31:52 -07:00
|
|
|
}
|
2015-04-06 07:02:44 -07:00
|
|
|
|
2015-10-16 07:31:52 -07:00
|
|
|
return NULL;
|
2015-04-06 07:09:47 -07:00
|
|
|
}
|
|
|
|
|
2015-12-11 16:47:22 -08:00
|
|
|
/* DEPRECATED */
|
|
|
|
gs_effect_t *obs_get_default_rect_effect(void)
|
|
|
|
{
|
|
|
|
if (!obs) return NULL;
|
|
|
|
return obs->video.default_rect_effect;
|
|
|
|
}
|
|
|
|
|
2014-09-25 17:44:05 -07:00
|
|
|
signal_handler_t *obs_get_signal_handler(void)
|
2013-12-26 22:10:15 -08:00
|
|
|
{
|
2014-02-13 07:58:31 -08:00
|
|
|
if (!obs) return NULL;
|
2013-12-26 22:10:15 -08:00
|
|
|
return obs->signals;
|
|
|
|
}
|
|
|
|
|
2014-09-25 17:44:05 -07:00
|
|
|
proc_handler_t *obs_get_proc_handler(void)
|
2013-12-26 22:10:15 -08:00
|
|
|
{
|
2014-02-13 07:58:31 -08:00
|
|
|
if (!obs) return NULL;
|
2013-12-26 22:10:15 -08:00
|
|
|
return obs->procs;
|
|
|
|
}
|
2014-02-13 07:58:31 -08:00
|
|
|
|
2014-02-13 09:21:16 -08:00
|
|
|
void obs_render_main_view(void)
|
2014-02-13 07:58:31 -08:00
|
|
|
{
|
|
|
|
if (!obs) return;
|
2014-02-13 09:21:16 -08:00
|
|
|
obs_view_render(&obs->data.main_view);
|
2014-02-13 07:58:31 -08:00
|
|
|
}
|
2014-02-20 14:53:16 -08:00
|
|
|
|
|
|
|
void obs_set_master_volume(float volume)
|
|
|
|
{
|
2014-02-21 16:51:16 -08:00
|
|
|
struct calldata data = {0};
|
2014-02-23 21:39:33 -08:00
|
|
|
|
2014-02-20 14:53:16 -08:00
|
|
|
if (!obs) return;
|
2014-02-21 16:51:16 -08:00
|
|
|
|
2014-08-05 17:49:28 -07:00
|
|
|
calldata_set_float(&data, "volume", volume);
|
2014-03-01 04:54:55 -08:00
|
|
|
signal_handler_signal(obs->signals, "master_volume", &data);
|
|
|
|
volume = (float)calldata_float(&data, "volume");
|
2014-02-21 16:51:16 -08:00
|
|
|
calldata_free(&data);
|
|
|
|
|
2014-02-20 14:53:16 -08:00
|
|
|
obs->audio.user_volume = volume;
|
|
|
|
}
|
|
|
|
|
|
|
|
float obs_get_master_volume(void)
|
|
|
|
{
|
|
|
|
return obs ? obs->audio.user_volume : 0.0f;
|
|
|
|
}
|
|
|
|
|
2015-12-29 15:25:45 -08:00
|
|
|
static obs_source_t *obs_load_source_type(obs_data_t *source_data)
|
2014-05-03 22:54:38 -07:00
|
|
|
{
|
2015-02-25 21:16:28 -08:00
|
|
|
obs_data_array_t *filters = obs_data_get_array(source_data, "filters");
|
2014-09-25 17:44:05 -07:00
|
|
|
obs_source_t *source;
|
2014-08-05 11:09:29 -07:00
|
|
|
const char *name = obs_data_get_string(source_data, "name");
|
|
|
|
const char *id = obs_data_get_string(source_data, "id");
|
2014-09-25 17:44:05 -07:00
|
|
|
obs_data_t *settings = obs_data_get_obj(source_data, "settings");
|
2014-11-01 13:41:17 -07:00
|
|
|
obs_data_t *hotkeys = obs_data_get_obj(source_data, "hotkeys");
|
2014-05-03 22:54:38 -07:00
|
|
|
double volume;
|
2014-12-27 21:21:36 -08:00
|
|
|
int64_t sync;
|
|
|
|
uint32_t flags;
|
(API Change) Add support for multiple audio mixers
API changed:
--------------------------
void obs_output_set_audio_encoder(
obs_output_t *output,
obs_encoder_t *encoder);
obs_encoder_t *obs_output_get_audio_encoder(
const obs_output_t *output);
obs_encoder_t *obs_audio_encoder_create(
const char *id,
const char *name,
obs_data_t *settings);
Changed to:
--------------------------
/* 'idx' specifies the track index of the output */
void obs_output_set_audio_encoder(
obs_output_t *output,
obs_encoder_t *encoder,
size_t idx);
/* 'idx' specifies the track index of the output */
obs_encoder_t *obs_output_get_audio_encoder(
const obs_output_t *output,
size_t idx);
/* 'mixer_idx' specifies the mixer index to capture audio from */
obs_encoder_t *obs_audio_encoder_create(
const char *id,
const char *name,
obs_data_t *settings,
size_t mixer_idx);
Overview
--------------------------
This feature allows multiple audio mixers to be used at a time. This
capability was able to be added with surprisingly very little extra
overhead. Audio will not be mixed unless it's assigned to a specific
mixer, and mixers will not mix unless they have an active mix
connection.
Mostly this will be useful for being able to separate out specific audio
for recording versus streaming, but will also be useful for certain
streaming services that support multiple audio streams via RTMP.
I didn't want to use a variable amount of mixers due to the desire to
reduce heap allocations, so currently I set the limit to 4 simultaneous
mixers; this number can be increased later if needed, but honestly I
feel like it's just the right number to use.
Sources:
Sources can now specify which audio mixers their audio is mixed to; this
can be a single mixer or multiple mixers at a time. The
obs_source_set_audio_mixers function sets the audio mixer which an audio
source applies to. For example, 0xF would mean that the source applies
to all four mixers.
Audio Encoders:
Audio encoders now must specify which specific audio mixer they use when
they encode audio data.
Outputs:
Outputs that use encoders can now support multiple audio tracks at once
if they have the OBS_OUTPUT_MULTI_TRACK capability flag set. This is
mostly only useful for certain types of RTMP transmissions, though may
be useful for file formats that support multiple audio tracks as well
later on.
2015-01-14 02:12:08 -08:00
|
|
|
uint32_t mixers;
|
2016-03-15 20:39:36 -07:00
|
|
|
int di_order;
|
|
|
|
int di_mode;
|
2014-05-03 22:54:38 -07:00
|
|
|
|
2015-12-29 15:25:45 -08:00
|
|
|
source = obs_source_create(id, name, settings, hotkeys);
|
2014-05-03 22:54:38 -07:00
|
|
|
|
2014-11-01 13:41:17 -07:00
|
|
|
obs_data_release(hotkeys);
|
2014-11-01 13:41:17 -07:00
|
|
|
|
2014-05-03 22:54:38 -07:00
|
|
|
obs_data_set_default_double(source_data, "volume", 1.0);
|
2014-08-05 11:09:29 -07:00
|
|
|
volume = obs_data_get_double(source_data, "volume");
|
2014-08-04 08:41:15 -07:00
|
|
|
obs_source_set_volume(source, (float)volume);
|
2014-05-03 22:54:38 -07:00
|
|
|
|
2014-12-27 21:21:36 -08:00
|
|
|
sync = obs_data_get_int(source_data, "sync");
|
|
|
|
obs_source_set_sync_offset(source, sync);
|
|
|
|
|
(API Change) Add support for multiple audio mixers
API changed:
--------------------------
void obs_output_set_audio_encoder(
obs_output_t *output,
obs_encoder_t *encoder);
obs_encoder_t *obs_output_get_audio_encoder(
const obs_output_t *output);
obs_encoder_t *obs_audio_encoder_create(
const char *id,
const char *name,
obs_data_t *settings);
Changed to:
--------------------------
/* 'idx' specifies the track index of the output */
void obs_output_set_audio_encoder(
obs_output_t *output,
obs_encoder_t *encoder,
size_t idx);
/* 'idx' specifies the track index of the output */
obs_encoder_t *obs_output_get_audio_encoder(
const obs_output_t *output,
size_t idx);
/* 'mixer_idx' specifies the mixer index to capture audio from */
obs_encoder_t *obs_audio_encoder_create(
const char *id,
const char *name,
obs_data_t *settings,
size_t mixer_idx);
Overview
--------------------------
This feature allows multiple audio mixers to be used at a time. This
capability was able to be added with surprisingly very little extra
overhead. Audio will not be mixed unless it's assigned to a specific
mixer, and mixers will not mix unless they have an active mix
connection.
Mostly this will be useful for being able to separate out specific audio
for recording versus streaming, but will also be useful for certain
streaming services that support multiple audio streams via RTMP.
I didn't want to use a variable amount of mixers due to the desire to
reduce heap allocations, so currently I set the limit to 4 simultaneous
mixers; this number can be increased later if needed, but honestly I
feel like it's just the right number to use.
Sources:
Sources can now specify which audio mixers their audio is mixed to; this
can be a single mixer or multiple mixers at a time. The
obs_source_set_audio_mixers function sets the audio mixer which an audio
source applies to. For example, 0xF would mean that the source applies
to all four mixers.
Audio Encoders:
Audio encoders now must specify which specific audio mixer they use when
they encode audio data.
Outputs:
Outputs that use encoders can now support multiple audio tracks at once
if they have the OBS_OUTPUT_MULTI_TRACK capability flag set. This is
mostly only useful for certain types of RTMP transmissions, though may
be useful for file formats that support multiple audio tracks as well
later on.
2015-01-14 02:12:08 -08:00
|
|
|
obs_data_set_default_int(source_data, "mixers", 0xF);
|
|
|
|
mixers = (uint32_t)obs_data_get_int(source_data, "mixers");
|
|
|
|
obs_source_set_audio_mixers(source, mixers);
|
|
|
|
|
2014-12-31 21:58:45 -08:00
|
|
|
obs_data_set_default_int(source_data, "flags", source->default_flags);
|
2014-12-27 21:21:36 -08:00
|
|
|
flags = (uint32_t)obs_data_get_int(source_data, "flags");
|
|
|
|
obs_source_set_flags(source, flags);
|
|
|
|
|
2015-03-17 18:15:50 -07:00
|
|
|
obs_data_set_default_bool(source_data, "enabled", true);
|
|
|
|
obs_source_set_enabled(source,
|
|
|
|
obs_data_get_bool(source_data, "enabled"));
|
|
|
|
|
2015-03-22 14:54:07 -07:00
|
|
|
obs_data_set_default_bool(source_data, "muted", false);
|
|
|
|
obs_source_set_muted(source, obs_data_get_bool(source_data, "muted"));
|
|
|
|
|
2015-05-12 12:31:08 -07:00
|
|
|
obs_data_set_default_bool(source_data, "push-to-mute", false);
|
|
|
|
obs_source_enable_push_to_mute(source,
|
|
|
|
obs_data_get_bool(source_data, "push-to-mute"));
|
|
|
|
|
|
|
|
obs_data_set_default_int(source_data, "push-to-mute-delay", 0);
|
|
|
|
obs_source_set_push_to_mute_delay(source,
|
|
|
|
obs_data_get_int(source_data, "push-to-mute-delay"));
|
|
|
|
|
2015-04-30 18:22:12 -07:00
|
|
|
obs_data_set_default_bool(source_data, "push-to-talk", false);
|
|
|
|
obs_source_enable_push_to_talk(source,
|
|
|
|
obs_data_get_bool(source_data, "push-to-talk"));
|
|
|
|
|
|
|
|
obs_data_set_default_int(source_data, "push-to-talk-delay", 0);
|
|
|
|
obs_source_set_push_to_talk_delay(source,
|
|
|
|
obs_data_get_int(source_data, "push-to-talk-delay"));
|
|
|
|
|
2016-03-15 20:39:36 -07:00
|
|
|
di_mode = (int)obs_data_get_int(source_data, "deinterlace_mode");
|
|
|
|
obs_source_set_deinterlace_mode(source,
|
|
|
|
(enum obs_deinterlace_mode)di_mode);
|
|
|
|
|
|
|
|
di_order = (int)obs_data_get_int(source_data, "deinterlace_field_order");
|
|
|
|
obs_source_set_deinterlace_field_order(source,
|
|
|
|
(enum obs_deinterlace_field_order)di_order);
|
|
|
|
|
2015-02-25 21:16:28 -08:00
|
|
|
if (filters) {
|
|
|
|
size_t count = obs_data_array_count(filters);
|
|
|
|
|
|
|
|
for (size_t i = 0; i < count; i++) {
|
|
|
|
obs_data_t *filter_data =
|
|
|
|
obs_data_array_item(filters, i);
|
|
|
|
|
|
|
|
obs_source_t *filter = obs_load_source_type(
|
2015-12-29 15:25:45 -08:00
|
|
|
filter_data);
|
2015-02-25 21:16:28 -08:00
|
|
|
if (filter) {
|
|
|
|
obs_source_filter_add(source, filter);
|
|
|
|
obs_source_release(filter);
|
|
|
|
}
|
|
|
|
|
|
|
|
obs_data_release(filter_data);
|
|
|
|
}
|
|
|
|
|
|
|
|
obs_data_array_release(filters);
|
|
|
|
}
|
|
|
|
|
2014-05-03 22:54:38 -07:00
|
|
|
obs_data_release(settings);
|
|
|
|
|
|
|
|
return source;
|
|
|
|
}
|
|
|
|
|
2015-02-25 21:16:28 -08:00
|
|
|
obs_source_t *obs_load_source(obs_data_t *source_data)
|
|
|
|
{
|
2015-12-29 15:25:45 -08:00
|
|
|
return obs_load_source_type(source_data);
|
2015-02-25 21:16:28 -08:00
|
|
|
}
|
|
|
|
|
2016-03-04 12:56:09 -08:00
|
|
|
void obs_load_sources(obs_data_array_t *array, obs_load_source_cb cb,
|
|
|
|
void *private_data)
|
2014-04-26 23:47:50 -07:00
|
|
|
{
|
2015-10-30 03:10:31 -07:00
|
|
|
if (!obs) return;
|
|
|
|
|
2015-10-29 13:26:19 -07:00
|
|
|
struct obs_core_data *data = &obs->data;
|
|
|
|
DARRAY(obs_source_t*) sources;
|
2014-04-26 23:47:50 -07:00
|
|
|
size_t count;
|
|
|
|
size_t i;
|
|
|
|
|
2015-10-29 13:26:19 -07:00
|
|
|
da_init(sources);
|
|
|
|
|
2014-04-26 23:47:50 -07:00
|
|
|
count = obs_data_array_count(array);
|
2015-10-29 13:26:19 -07:00
|
|
|
da_reserve(sources, count);
|
2014-04-26 23:47:50 -07:00
|
|
|
|
2015-10-29 13:26:19 -07:00
|
|
|
pthread_mutex_lock(&data->sources_mutex);
|
2014-04-26 23:47:50 -07:00
|
|
|
|
|
|
|
for (i = 0; i < count; i++) {
|
2014-09-25 17:44:05 -07:00
|
|
|
obs_data_t *source_data = obs_data_array_item(array, i);
|
|
|
|
obs_source_t *source = obs_load_source(source_data);
|
2014-04-26 23:47:50 -07:00
|
|
|
|
2015-10-29 13:26:19 -07:00
|
|
|
da_push_back(sources, &source);
|
2014-04-26 23:47:50 -07:00
|
|
|
|
|
|
|
obs_data_release(source_data);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* tell sources that we want to load */
|
libobs: Implement transition sources
Transition sources are implemented by registering a source type as
OBS_SOURCE_TYPE_TRANSITION. They're automatically marked as video
composite sources, and video_render/audio_render callbacks must be set
when registering the source. get_width and get_height callbacks are
unused for these types of sources, as transitions automatically handle
width/height behind the scenes with the transition settings.
In the video_render callback, the helper function
obs_transition_video_render is used to assist in automatically
processing and rendering the audio. A render callback is passed to the
function, which in turn passes to/from textures that are automatically
rendered in the back-end.
Similarly, in the audio_render callback, the helper function
obs_transition_audio_render is used to assist in automatically
processing and rendering the audio. Two mix callbacks are used to
handle how the source/destination sources are mixed together. To ensure
the best possible quality, audio processing is per-sample.
Transitions can be set to automatically resize, or they can be set to
have a fixed size. Sources within transitions can be made to scale to
the transition size (with or without aspect ratio), or to not scale
unless they're bigger than the transition. They can have a specific
alignment within the transition, or they just default to top-left.
These features are implemented for the purpose of extending transitions
to also act as "switch" sources later, where you can switch to/from two
different sources using the transition animation.
Planned (but not yet implemented and lower priority) features:
- "Switch" transitions which allow the ability to switch back and forth
between two sources with a transitioning animation without discarding
the references
- Easing options to allow the option to transition with a bezier or
custom curve
- Manual transitioning to allow the front-end/user to manually control
the transition offset
2016-01-03 16:41:14 -08:00
|
|
|
for (i = 0; i < sources.num; i++) {
|
|
|
|
obs_source_t *source = sources.array[i];
|
|
|
|
obs_data_t *source_data = obs_data_array_item(array, i);
|
|
|
|
if (source) {
|
|
|
|
if (source->info.type == OBS_SOURCE_TYPE_TRANSITION)
|
|
|
|
obs_transition_load(source, source_data);
|
|
|
|
obs_source_load(source);
|
2016-03-04 12:56:09 -08:00
|
|
|
cb(private_data, source);
|
libobs: Implement transition sources
Transition sources are implemented by registering a source type as
OBS_SOURCE_TYPE_TRANSITION. They're automatically marked as video
composite sources, and video_render/audio_render callbacks must be set
when registering the source. get_width and get_height callbacks are
unused for these types of sources, as transitions automatically handle
width/height behind the scenes with the transition settings.
In the video_render callback, the helper function
obs_transition_video_render is used to assist in automatically
processing and rendering the audio. A render callback is passed to the
function, which in turn passes to/from textures that are automatically
rendered in the back-end.
Similarly, in the audio_render callback, the helper function
obs_transition_audio_render is used to assist in automatically
processing and rendering the audio. Two mix callbacks are used to
handle how the source/destination sources are mixed together. To ensure
the best possible quality, audio processing is per-sample.
Transitions can be set to automatically resize, or they can be set to
have a fixed size. Sources within transitions can be made to scale to
the transition size (with or without aspect ratio), or to not scale
unless they're bigger than the transition. They can have a specific
alignment within the transition, or they just default to top-left.
These features are implemented for the purpose of extending transitions
to also act as "switch" sources later, where you can switch to/from two
different sources using the transition animation.
Planned (but not yet implemented and lower priority) features:
- "Switch" transitions which allow the ability to switch back and forth
between two sources with a transitioning animation without discarding
the references
- Easing options to allow the option to transition with a bezier or
custom curve
- Manual transitioning to allow the front-end/user to manually control
the transition offset
2016-01-03 16:41:14 -08:00
|
|
|
}
|
|
|
|
obs_data_release(source_data);
|
|
|
|
}
|
|
|
|
|
2015-10-29 13:26:19 -07:00
|
|
|
for (i = 0; i < sources.num; i++)
|
|
|
|
obs_source_release(sources.array[i]);
|
2014-04-26 23:47:50 -07:00
|
|
|
|
2015-10-29 13:26:19 -07:00
|
|
|
pthread_mutex_unlock(&data->sources_mutex);
|
|
|
|
|
|
|
|
da_free(sources);
|
2014-04-26 23:47:50 -07:00
|
|
|
}
|
|
|
|
|
2014-09-25 17:44:05 -07:00
|
|
|
obs_data_t *obs_save_source(obs_source_t *source)
|
2014-04-26 23:47:50 -07:00
|
|
|
{
|
2015-02-25 21:16:28 -08:00
|
|
|
obs_data_array_t *filters = obs_data_array_create();
|
2014-09-25 17:44:05 -07:00
|
|
|
obs_data_t *source_data = obs_data_create();
|
|
|
|
obs_data_t *settings = obs_source_get_settings(source);
|
2014-11-01 13:41:17 -07:00
|
|
|
obs_data_t *hotkey_data = source->context.hotkey_data;
|
|
|
|
obs_data_t *hotkeys;
|
2014-08-04 08:41:15 -07:00
|
|
|
float volume = obs_source_get_volume(source);
|
(API Change) Add support for multiple audio mixers
API changed:
--------------------------
void obs_output_set_audio_encoder(
obs_output_t *output,
obs_encoder_t *encoder);
obs_encoder_t *obs_output_get_audio_encoder(
const obs_output_t *output);
obs_encoder_t *obs_audio_encoder_create(
const char *id,
const char *name,
obs_data_t *settings);
Changed to:
--------------------------
/* 'idx' specifies the track index of the output */
void obs_output_set_audio_encoder(
obs_output_t *output,
obs_encoder_t *encoder,
size_t idx);
/* 'idx' specifies the track index of the output */
obs_encoder_t *obs_output_get_audio_encoder(
const obs_output_t *output,
size_t idx);
/* 'mixer_idx' specifies the mixer index to capture audio from */
obs_encoder_t *obs_audio_encoder_create(
const char *id,
const char *name,
obs_data_t *settings,
size_t mixer_idx);
Overview
--------------------------
This feature allows multiple audio mixers to be used at a time. This
capability was able to be added with surprisingly very little extra
overhead. Audio will not be mixed unless it's assigned to a specific
mixer, and mixers will not mix unless they have an active mix
connection.
Mostly this will be useful for being able to separate out specific audio
for recording versus streaming, but will also be useful for certain
streaming services that support multiple audio streams via RTMP.
I didn't want to use a variable amount of mixers due to the desire to
reduce heap allocations, so currently I set the limit to 4 simultaneous
mixers; this number can be increased later if needed, but honestly I
feel like it's just the right number to use.
Sources:
Sources can now specify which audio mixers their audio is mixed to; this
can be a single mixer or multiple mixers at a time. The
obs_source_set_audio_mixers function sets the audio mixer which an audio
source applies to. For example, 0xF would mean that the source applies
to all four mixers.
Audio Encoders:
Audio encoders now must specify which specific audio mixer they use when
they encode audio data.
Outputs:
Outputs that use encoders can now support multiple audio tracks at once
if they have the OBS_OUTPUT_MULTI_TRACK capability flag set. This is
mostly only useful for certain types of RTMP transmissions, though may
be useful for file formats that support multiple audio tracks as well
later on.
2015-01-14 02:12:08 -08:00
|
|
|
uint32_t mixers = obs_source_get_audio_mixers(source);
|
2014-12-27 21:21:36 -08:00
|
|
|
int64_t sync = obs_source_get_sync_offset(source);
|
|
|
|
uint32_t flags = obs_source_get_flags(source);
|
2014-08-04 08:41:15 -07:00
|
|
|
const char *name = obs_source_get_name(source);
|
2014-08-02 12:42:47 -07:00
|
|
|
const char *id = obs_source_get_id(source);
|
2015-03-17 18:15:50 -07:00
|
|
|
bool enabled = obs_source_enabled(source);
|
2015-03-22 14:54:07 -07:00
|
|
|
bool muted = obs_source_muted(source);
|
2015-05-12 12:31:08 -07:00
|
|
|
bool push_to_mute= obs_source_push_to_mute_enabled(source);
|
|
|
|
uint64_t ptm_delay = obs_source_get_push_to_mute_delay(source);
|
2015-04-30 18:22:12 -07:00
|
|
|
bool push_to_talk= obs_source_push_to_talk_enabled(source);
|
|
|
|
uint64_t ptt_delay = obs_source_get_push_to_talk_delay(source);
|
2016-03-15 20:39:36 -07:00
|
|
|
int di_mode = (int)obs_source_get_deinterlace_mode(source);
|
|
|
|
int di_order =
|
|
|
|
(int)obs_source_get_deinterlace_field_order(source);
|
2014-05-03 22:54:38 -07:00
|
|
|
|
|
|
|
obs_source_save(source);
|
2014-11-01 13:41:17 -07:00
|
|
|
hotkeys = obs_hotkeys_save_source(source);
|
|
|
|
|
|
|
|
if (hotkeys) {
|
|
|
|
obs_data_release(hotkey_data);
|
|
|
|
source->context.hotkey_data = hotkeys;
|
|
|
|
hotkey_data = hotkeys;
|
|
|
|
}
|
2014-04-26 23:47:50 -07:00
|
|
|
|
2014-08-05 11:09:29 -07:00
|
|
|
obs_data_set_string(source_data, "name", name);
|
|
|
|
obs_data_set_string(source_data, "id", id);
|
|
|
|
obs_data_set_obj (source_data, "settings", settings);
|
(API Change) Add support for multiple audio mixers
API changed:
--------------------------
void obs_output_set_audio_encoder(
obs_output_t *output,
obs_encoder_t *encoder);
obs_encoder_t *obs_output_get_audio_encoder(
const obs_output_t *output);
obs_encoder_t *obs_audio_encoder_create(
const char *id,
const char *name,
obs_data_t *settings);
Changed to:
--------------------------
/* 'idx' specifies the track index of the output */
void obs_output_set_audio_encoder(
obs_output_t *output,
obs_encoder_t *encoder,
size_t idx);
/* 'idx' specifies the track index of the output */
obs_encoder_t *obs_output_get_audio_encoder(
const obs_output_t *output,
size_t idx);
/* 'mixer_idx' specifies the mixer index to capture audio from */
obs_encoder_t *obs_audio_encoder_create(
const char *id,
const char *name,
obs_data_t *settings,
size_t mixer_idx);
Overview
--------------------------
This feature allows multiple audio mixers to be used at a time. This
capability was able to be added with surprisingly very little extra
overhead. Audio will not be mixed unless it's assigned to a specific
mixer, and mixers will not mix unless they have an active mix
connection.
Mostly this will be useful for being able to separate out specific audio
for recording versus streaming, but will also be useful for certain
streaming services that support multiple audio streams via RTMP.
I didn't want to use a variable amount of mixers due to the desire to
reduce heap allocations, so currently I set the limit to 4 simultaneous
mixers; this number can be increased later if needed, but honestly I
feel like it's just the right number to use.
Sources:
Sources can now specify which audio mixers their audio is mixed to; this
can be a single mixer or multiple mixers at a time. The
obs_source_set_audio_mixers function sets the audio mixer which an audio
source applies to. For example, 0xF would mean that the source applies
to all four mixers.
Audio Encoders:
Audio encoders now must specify which specific audio mixer they use when
they encode audio data.
Outputs:
Outputs that use encoders can now support multiple audio tracks at once
if they have the OBS_OUTPUT_MULTI_TRACK capability flag set. This is
mostly only useful for certain types of RTMP transmissions, though may
be useful for file formats that support multiple audio tracks as well
later on.
2015-01-14 02:12:08 -08:00
|
|
|
obs_data_set_int (source_data, "mixers", mixers);
|
2014-12-27 21:21:36 -08:00
|
|
|
obs_data_set_int (source_data, "sync", sync);
|
|
|
|
obs_data_set_int (source_data, "flags", flags);
|
2014-08-05 11:09:29 -07:00
|
|
|
obs_data_set_double(source_data, "volume", volume);
|
2015-03-17 18:15:50 -07:00
|
|
|
obs_data_set_bool (source_data, "enabled", enabled);
|
2015-03-22 14:54:07 -07:00
|
|
|
obs_data_set_bool (source_data, "muted", muted);
|
2015-05-12 12:31:08 -07:00
|
|
|
obs_data_set_bool (source_data, "push-to-mute", push_to_mute);
|
|
|
|
obs_data_set_int (source_data, "push-to-mute-delay", ptm_delay);
|
2015-04-30 18:22:12 -07:00
|
|
|
obs_data_set_bool (source_data, "push-to-talk", push_to_talk);
|
|
|
|
obs_data_set_int (source_data, "push-to-talk-delay", ptt_delay);
|
2014-11-01 13:41:17 -07:00
|
|
|
obs_data_set_obj (source_data, "hotkeys", hotkey_data);
|
2016-03-15 20:39:36 -07:00
|
|
|
obs_data_set_int (source_data, "deinterlace_mode", di_mode);
|
|
|
|
obs_data_set_int (source_data, "deinterlace_field_order", di_order);
|
2014-04-26 23:47:50 -07:00
|
|
|
|
libobs: Implement transition sources
Transition sources are implemented by registering a source type as
OBS_SOURCE_TYPE_TRANSITION. They're automatically marked as video
composite sources, and video_render/audio_render callbacks must be set
when registering the source. get_width and get_height callbacks are
unused for these types of sources, as transitions automatically handle
width/height behind the scenes with the transition settings.
In the video_render callback, the helper function
obs_transition_video_render is used to assist in automatically
processing and rendering the audio. A render callback is passed to the
function, which in turn passes to/from textures that are automatically
rendered in the back-end.
Similarly, in the audio_render callback, the helper function
obs_transition_audio_render is used to assist in automatically
processing and rendering the audio. Two mix callbacks are used to
handle how the source/destination sources are mixed together. To ensure
the best possible quality, audio processing is per-sample.
Transitions can be set to automatically resize, or they can be set to
have a fixed size. Sources within transitions can be made to scale to
the transition size (with or without aspect ratio), or to not scale
unless they're bigger than the transition. They can have a specific
alignment within the transition, or they just default to top-left.
These features are implemented for the purpose of extending transitions
to also act as "switch" sources later, where you can switch to/from two
different sources using the transition animation.
Planned (but not yet implemented and lower priority) features:
- "Switch" transitions which allow the ability to switch back and forth
between two sources with a transitioning animation without discarding
the references
- Easing options to allow the option to transition with a bezier or
custom curve
- Manual transitioning to allow the front-end/user to manually control
the transition offset
2016-01-03 16:41:14 -08:00
|
|
|
if (source->info.type == OBS_SOURCE_TYPE_TRANSITION)
|
|
|
|
obs_transition_save(source, source_data);
|
|
|
|
|
2015-02-25 21:16:28 -08:00
|
|
|
pthread_mutex_lock(&source->filter_mutex);
|
|
|
|
|
|
|
|
if (source->filters.num) {
|
2015-03-14 07:02:23 -07:00
|
|
|
for (size_t i = source->filters.num; i > 0; i--) {
|
|
|
|
obs_source_t *filter = source->filters.array[i - 1];
|
2015-02-25 21:16:28 -08:00
|
|
|
obs_data_t *filter_data = obs_save_source(filter);
|
|
|
|
obs_data_array_push_back(filters, filter_data);
|
|
|
|
obs_data_release(filter_data);
|
|
|
|
}
|
|
|
|
|
|
|
|
obs_data_set_array(source_data, "filters", filters);
|
|
|
|
}
|
|
|
|
|
|
|
|
pthread_mutex_unlock(&source->filter_mutex);
|
|
|
|
|
2014-04-26 23:47:50 -07:00
|
|
|
obs_data_release(settings);
|
2015-02-25 21:16:28 -08:00
|
|
|
obs_data_array_release(filters);
|
2014-05-03 22:54:38 -07:00
|
|
|
|
|
|
|
return source_data;
|
2014-04-26 23:47:50 -07:00
|
|
|
}
|
|
|
|
|
2015-10-30 03:10:06 -07:00
|
|
|
obs_data_array_t *obs_save_sources_filtered(obs_save_source_filter_cb cb,
|
|
|
|
void *data_)
|
2014-04-26 23:47:50 -07:00
|
|
|
{
|
2015-10-30 03:10:31 -07:00
|
|
|
if (!obs) return NULL;
|
|
|
|
|
2015-10-29 13:26:19 -07:00
|
|
|
struct obs_core_data *data = &obs->data;
|
2014-09-25 17:44:05 -07:00
|
|
|
obs_data_array_t *array;
|
2015-10-29 13:26:19 -07:00
|
|
|
obs_source_t *source;
|
2014-04-26 23:47:50 -07:00
|
|
|
|
|
|
|
array = obs_data_array_create();
|
|
|
|
|
2015-10-29 13:26:19 -07:00
|
|
|
pthread_mutex_lock(&data->sources_mutex);
|
2014-04-26 23:47:50 -07:00
|
|
|
|
2015-10-29 13:26:19 -07:00
|
|
|
source = data->first_source;
|
2014-05-03 22:54:38 -07:00
|
|
|
|
2015-10-29 13:26:19 -07:00
|
|
|
while (source) {
|
2015-12-29 15:25:45 -08:00
|
|
|
if ((source->info.type != OBS_SOURCE_TYPE_FILTER) != 0 &&
|
2016-01-09 13:05:44 -08:00
|
|
|
!source->context.private && cb(data_, source)) {
|
2015-10-29 13:26:19 -07:00
|
|
|
obs_data_t *source_data = obs_save_source(source);
|
|
|
|
|
|
|
|
obs_data_array_push_back(array, source_data);
|
|
|
|
obs_data_release(source_data);
|
|
|
|
}
|
|
|
|
|
|
|
|
source = (obs_source_t*)source->context.next;
|
2014-04-26 23:47:50 -07:00
|
|
|
}
|
|
|
|
|
2015-10-29 13:26:19 -07:00
|
|
|
pthread_mutex_unlock(&data->sources_mutex);
|
2014-04-26 23:47:50 -07:00
|
|
|
|
|
|
|
return array;
|
|
|
|
}
|
|
|
|
|
2015-10-30 03:10:06 -07:00
|
|
|
static bool save_source_filter(void *data, obs_source_t *source)
|
|
|
|
{
|
|
|
|
UNUSED_PARAMETER(data);
|
|
|
|
UNUSED_PARAMETER(source);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
obs_data_array_t *obs_save_sources(void)
|
|
|
|
{
|
|
|
|
return obs_save_sources_filtered(save_source_filter, NULL);
|
|
|
|
}
|
|
|
|
|
libobs: Add services API, reduce repeated code
Add API for streaming services. The services API simplifies the
creation of custom service features and user interface.
Custom streaming services later on will be able to do things such as:
- Be able to use service-specific APIs via modules, allowing a more
direct means of communicating with the service and requesting or
setting service-specific information
- Get URL/stream key via other means of authentication such as OAuth,
or be able to build custom URLs for services that require that sort
of thing.
- Query information (such as viewer count, chat, follower
notifications, and other information)
- Set channel information (such as current game, current channel title,
activating commercials)
Also, I reduce some repeated code that was used for all libobs objects.
This includes the name of the object, the private data, settings, as
well as the signal and procedure handlers.
I also switched to using linked lists for the global object lists,
rather than using an array of pointers (you could say it was..
pointless.) ..Anyway, the linked list info is also stored in the shared
context data structure.
2014-04-19 20:38:53 -07:00
|
|
|
/* ensures that names are never blank */
|
2016-01-09 13:05:44 -08:00
|
|
|
static inline char *dup_name(const char *name, bool private)
|
libobs: Add services API, reduce repeated code
Add API for streaming services. The services API simplifies the
creation of custom service features and user interface.
Custom streaming services later on will be able to do things such as:
- Be able to use service-specific APIs via modules, allowing a more
direct means of communicating with the service and requesting or
setting service-specific information
- Get URL/stream key via other means of authentication such as OAuth,
or be able to build custom URLs for services that require that sort
of thing.
- Query information (such as viewer count, chat, follower
notifications, and other information)
- Set channel information (such as current game, current channel title,
activating commercials)
Also, I reduce some repeated code that was used for all libobs objects.
This includes the name of the object, the private data, settings, as
well as the signal and procedure handlers.
I also switched to using linked lists for the global object lists,
rather than using an array of pointers (you could say it was..
pointless.) ..Anyway, the linked list info is also stored in the shared
context data structure.
2014-04-19 20:38:53 -07:00
|
|
|
{
|
2016-01-09 13:05:44 -08:00
|
|
|
if (private && !name)
|
|
|
|
return NULL;
|
|
|
|
|
libobs: Add services API, reduce repeated code
Add API for streaming services. The services API simplifies the
creation of custom service features and user interface.
Custom streaming services later on will be able to do things such as:
- Be able to use service-specific APIs via modules, allowing a more
direct means of communicating with the service and requesting or
setting service-specific information
- Get URL/stream key via other means of authentication such as OAuth,
or be able to build custom URLs for services that require that sort
of thing.
- Query information (such as viewer count, chat, follower
notifications, and other information)
- Set channel information (such as current game, current channel title,
activating commercials)
Also, I reduce some repeated code that was used for all libobs objects.
This includes the name of the object, the private data, settings, as
well as the signal and procedure handlers.
I also switched to using linked lists for the global object lists,
rather than using an array of pointers (you could say it was..
pointless.) ..Anyway, the linked list info is also stored in the shared
context data structure.
2014-04-19 20:38:53 -07:00
|
|
|
if (!name || !*name) {
|
|
|
|
struct dstr unnamed = {0};
|
2015-08-30 23:41:09 -07:00
|
|
|
dstr_printf(&unnamed, "__unnamed%04lld",
|
libobs: Add services API, reduce repeated code
Add API for streaming services. The services API simplifies the
creation of custom service features and user interface.
Custom streaming services later on will be able to do things such as:
- Be able to use service-specific APIs via modules, allowing a more
direct means of communicating with the service and requesting or
setting service-specific information
- Get URL/stream key via other means of authentication such as OAuth,
or be able to build custom URLs for services that require that sort
of thing.
- Query information (such as viewer count, chat, follower
notifications, and other information)
- Set channel information (such as current game, current channel title,
activating commercials)
Also, I reduce some repeated code that was used for all libobs objects.
This includes the name of the object, the private data, settings, as
well as the signal and procedure handlers.
I also switched to using linked lists for the global object lists,
rather than using an array of pointers (you could say it was..
pointless.) ..Anyway, the linked list info is also stored in the shared
context data structure.
2014-04-19 20:38:53 -07:00
|
|
|
obs->data.unnamed_index++);
|
|
|
|
|
|
|
|
return unnamed.array;
|
|
|
|
} else {
|
|
|
|
return bstrdup(name);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline bool obs_context_data_init_wrap(
|
|
|
|
struct obs_context_data *context,
|
2016-02-26 18:18:00 -08:00
|
|
|
enum obs_obj_type type,
|
2014-09-25 17:44:05 -07:00
|
|
|
obs_data_t *settings,
|
2014-11-01 13:41:17 -07:00
|
|
|
const char *name,
|
2016-01-09 13:05:44 -08:00
|
|
|
obs_data_t *hotkey_data,
|
|
|
|
bool private)
|
libobs: Add services API, reduce repeated code
Add API for streaming services. The services API simplifies the
creation of custom service features and user interface.
Custom streaming services later on will be able to do things such as:
- Be able to use service-specific APIs via modules, allowing a more
direct means of communicating with the service and requesting or
setting service-specific information
- Get URL/stream key via other means of authentication such as OAuth,
or be able to build custom URLs for services that require that sort
of thing.
- Query information (such as viewer count, chat, follower
notifications, and other information)
- Set channel information (such as current game, current channel title,
activating commercials)
Also, I reduce some repeated code that was used for all libobs objects.
This includes the name of the object, the private data, settings, as
well as the signal and procedure handlers.
I also switched to using linked lists for the global object lists,
rather than using an array of pointers (you could say it was..
pointless.) ..Anyway, the linked list info is also stored in the shared
context data structure.
2014-04-19 20:38:53 -07:00
|
|
|
{
|
|
|
|
assert(context);
|
2014-07-02 21:05:46 -07:00
|
|
|
memset(context, 0, sizeof(*context));
|
2016-01-09 13:05:44 -08:00
|
|
|
context->private = private;
|
2016-02-26 18:18:00 -08:00
|
|
|
context->type = type;
|
libobs: Add services API, reduce repeated code
Add API for streaming services. The services API simplifies the
creation of custom service features and user interface.
Custom streaming services later on will be able to do things such as:
- Be able to use service-specific APIs via modules, allowing a more
direct means of communicating with the service and requesting or
setting service-specific information
- Get URL/stream key via other means of authentication such as OAuth,
or be able to build custom URLs for services that require that sort
of thing.
- Query information (such as viewer count, chat, follower
notifications, and other information)
- Set channel information (such as current game, current channel title,
activating commercials)
Also, I reduce some repeated code that was used for all libobs objects.
This includes the name of the object, the private data, settings, as
well as the signal and procedure handlers.
I also switched to using linked lists for the global object lists,
rather than using an array of pointers (you could say it was..
pointless.) ..Anyway, the linked list info is also stored in the shared
context data structure.
2014-04-19 20:38:53 -07:00
|
|
|
|
2014-07-02 20:58:30 -07:00
|
|
|
pthread_mutex_init_value(&context->rename_cache_mutex);
|
|
|
|
if (pthread_mutex_init(&context->rename_cache_mutex, NULL) < 0)
|
|
|
|
return false;
|
|
|
|
|
libobs: Add services API, reduce repeated code
Add API for streaming services. The services API simplifies the
creation of custom service features and user interface.
Custom streaming services later on will be able to do things such as:
- Be able to use service-specific APIs via modules, allowing a more
direct means of communicating with the service and requesting or
setting service-specific information
- Get URL/stream key via other means of authentication such as OAuth,
or be able to build custom URLs for services that require that sort
of thing.
- Query information (such as viewer count, chat, follower
notifications, and other information)
- Set channel information (such as current game, current channel title,
activating commercials)
Also, I reduce some repeated code that was used for all libobs objects.
This includes the name of the object, the private data, settings, as
well as the signal and procedure handlers.
I also switched to using linked lists for the global object lists,
rather than using an array of pointers (you could say it was..
pointless.) ..Anyway, the linked list info is also stored in the shared
context data structure.
2014-04-19 20:38:53 -07:00
|
|
|
context->signals = signal_handler_create();
|
2014-10-28 14:58:13 -07:00
|
|
|
if (!context->signals)
|
libobs: Add services API, reduce repeated code
Add API for streaming services. The services API simplifies the
creation of custom service features and user interface.
Custom streaming services later on will be able to do things such as:
- Be able to use service-specific APIs via modules, allowing a more
direct means of communicating with the service and requesting or
setting service-specific information
- Get URL/stream key via other means of authentication such as OAuth,
or be able to build custom URLs for services that require that sort
of thing.
- Query information (such as viewer count, chat, follower
notifications, and other information)
- Set channel information (such as current game, current channel title,
activating commercials)
Also, I reduce some repeated code that was used for all libobs objects.
This includes the name of the object, the private data, settings, as
well as the signal and procedure handlers.
I also switched to using linked lists for the global object lists,
rather than using an array of pointers (you could say it was..
pointless.) ..Anyway, the linked list info is also stored in the shared
context data structure.
2014-04-19 20:38:53 -07:00
|
|
|
return false;
|
|
|
|
|
|
|
|
context->procs = proc_handler_create();
|
|
|
|
if (!context->procs)
|
|
|
|
return false;
|
|
|
|
|
2016-01-09 13:05:44 -08:00
|
|
|
context->name = dup_name(name, private);
|
2014-11-01 13:41:17 -07:00
|
|
|
context->settings = obs_data_newref(settings);
|
|
|
|
context->hotkey_data = obs_data_newref(hotkey_data);
|
libobs: Add services API, reduce repeated code
Add API for streaming services. The services API simplifies the
creation of custom service features and user interface.
Custom streaming services later on will be able to do things such as:
- Be able to use service-specific APIs via modules, allowing a more
direct means of communicating with the service and requesting or
setting service-specific information
- Get URL/stream key via other means of authentication such as OAuth,
or be able to build custom URLs for services that require that sort
of thing.
- Query information (such as viewer count, chat, follower
notifications, and other information)
- Set channel information (such as current game, current channel title,
activating commercials)
Also, I reduce some repeated code that was used for all libobs objects.
This includes the name of the object, the private data, settings, as
well as the signal and procedure handlers.
I also switched to using linked lists for the global object lists,
rather than using an array of pointers (you could say it was..
pointless.) ..Anyway, the linked list info is also stored in the shared
context data structure.
2014-04-19 20:38:53 -07:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool obs_context_data_init(
|
|
|
|
struct obs_context_data *context,
|
2016-02-26 18:18:00 -08:00
|
|
|
enum obs_obj_type type,
|
2014-09-25 17:44:05 -07:00
|
|
|
obs_data_t *settings,
|
2014-11-01 13:41:17 -07:00
|
|
|
const char *name,
|
2016-01-09 13:05:44 -08:00
|
|
|
obs_data_t *hotkey_data,
|
|
|
|
bool private)
|
libobs: Add services API, reduce repeated code
Add API for streaming services. The services API simplifies the
creation of custom service features and user interface.
Custom streaming services later on will be able to do things such as:
- Be able to use service-specific APIs via modules, allowing a more
direct means of communicating with the service and requesting or
setting service-specific information
- Get URL/stream key via other means of authentication such as OAuth,
or be able to build custom URLs for services that require that sort
of thing.
- Query information (such as viewer count, chat, follower
notifications, and other information)
- Set channel information (such as current game, current channel title,
activating commercials)
Also, I reduce some repeated code that was used for all libobs objects.
This includes the name of the object, the private data, settings, as
well as the signal and procedure handlers.
I also switched to using linked lists for the global object lists,
rather than using an array of pointers (you could say it was..
pointless.) ..Anyway, the linked list info is also stored in the shared
context data structure.
2014-04-19 20:38:53 -07:00
|
|
|
{
|
2016-02-26 18:18:00 -08:00
|
|
|
if (obs_context_data_init_wrap(context, type, settings, name,
|
|
|
|
hotkey_data, private)) {
|
libobs: Add services API, reduce repeated code
Add API for streaming services. The services API simplifies the
creation of custom service features and user interface.
Custom streaming services later on will be able to do things such as:
- Be able to use service-specific APIs via modules, allowing a more
direct means of communicating with the service and requesting or
setting service-specific information
- Get URL/stream key via other means of authentication such as OAuth,
or be able to build custom URLs for services that require that sort
of thing.
- Query information (such as viewer count, chat, follower
notifications, and other information)
- Set channel information (such as current game, current channel title,
activating commercials)
Also, I reduce some repeated code that was used for all libobs objects.
This includes the name of the object, the private data, settings, as
well as the signal and procedure handlers.
I also switched to using linked lists for the global object lists,
rather than using an array of pointers (you could say it was..
pointless.) ..Anyway, the linked list info is also stored in the shared
context data structure.
2014-04-19 20:38:53 -07:00
|
|
|
return true;
|
|
|
|
} else {
|
|
|
|
obs_context_data_free(context);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void obs_context_data_free(struct obs_context_data *context)
|
|
|
|
{
|
2014-11-01 13:41:17 -07:00
|
|
|
obs_hotkeys_context_release(context);
|
libobs: Add services API, reduce repeated code
Add API for streaming services. The services API simplifies the
creation of custom service features and user interface.
Custom streaming services later on will be able to do things such as:
- Be able to use service-specific APIs via modules, allowing a more
direct means of communicating with the service and requesting or
setting service-specific information
- Get URL/stream key via other means of authentication such as OAuth,
or be able to build custom URLs for services that require that sort
of thing.
- Query information (such as viewer count, chat, follower
notifications, and other information)
- Set channel information (such as current game, current channel title,
activating commercials)
Also, I reduce some repeated code that was used for all libobs objects.
This includes the name of the object, the private data, settings, as
well as the signal and procedure handlers.
I also switched to using linked lists for the global object lists,
rather than using an array of pointers (you could say it was..
pointless.) ..Anyway, the linked list info is also stored in the shared
context data structure.
2014-04-19 20:38:53 -07:00
|
|
|
signal_handler_destroy(context->signals);
|
|
|
|
proc_handler_destroy(context->procs);
|
|
|
|
obs_data_release(context->settings);
|
|
|
|
obs_context_data_remove(context);
|
2014-07-02 20:58:30 -07:00
|
|
|
pthread_mutex_destroy(&context->rename_cache_mutex);
|
libobs: Add services API, reduce repeated code
Add API for streaming services. The services API simplifies the
creation of custom service features and user interface.
Custom streaming services later on will be able to do things such as:
- Be able to use service-specific APIs via modules, allowing a more
direct means of communicating with the service and requesting or
setting service-specific information
- Get URL/stream key via other means of authentication such as OAuth,
or be able to build custom URLs for services that require that sort
of thing.
- Query information (such as viewer count, chat, follower
notifications, and other information)
- Set channel information (such as current game, current channel title,
activating commercials)
Also, I reduce some repeated code that was used for all libobs objects.
This includes the name of the object, the private data, settings, as
well as the signal and procedure handlers.
I also switched to using linked lists for the global object lists,
rather than using an array of pointers (you could say it was..
pointless.) ..Anyway, the linked list info is also stored in the shared
context data structure.
2014-04-19 20:38:53 -07:00
|
|
|
bfree(context->name);
|
|
|
|
|
2014-07-02 20:58:30 -07:00
|
|
|
for (size_t i = 0; i < context->rename_cache.num; i++)
|
|
|
|
bfree(context->rename_cache.array[i]);
|
|
|
|
da_free(context->rename_cache);
|
|
|
|
|
libobs: Add services API, reduce repeated code
Add API for streaming services. The services API simplifies the
creation of custom service features and user interface.
Custom streaming services later on will be able to do things such as:
- Be able to use service-specific APIs via modules, allowing a more
direct means of communicating with the service and requesting or
setting service-specific information
- Get URL/stream key via other means of authentication such as OAuth,
or be able to build custom URLs for services that require that sort
of thing.
- Query information (such as viewer count, chat, follower
notifications, and other information)
- Set channel information (such as current game, current channel title,
activating commercials)
Also, I reduce some repeated code that was used for all libobs objects.
This includes the name of the object, the private data, settings, as
well as the signal and procedure handlers.
I also switched to using linked lists for the global object lists,
rather than using an array of pointers (you could say it was..
pointless.) ..Anyway, the linked list info is also stored in the shared
context data structure.
2014-04-19 20:38:53 -07:00
|
|
|
memset(context, 0, sizeof(*context));
|
|
|
|
}
|
|
|
|
|
|
|
|
void obs_context_data_insert(struct obs_context_data *context,
|
|
|
|
pthread_mutex_t *mutex, void *pfirst)
|
|
|
|
{
|
|
|
|
struct obs_context_data **first = pfirst;
|
|
|
|
|
|
|
|
assert(context);
|
|
|
|
assert(mutex);
|
|
|
|
assert(first);
|
|
|
|
|
|
|
|
context->mutex = mutex;
|
|
|
|
|
|
|
|
pthread_mutex_lock(mutex);
|
|
|
|
context->prev_next = first;
|
|
|
|
context->next = *first;
|
|
|
|
*first = context;
|
|
|
|
if (context->next)
|
|
|
|
context->next->prev_next = &context->next;
|
|
|
|
pthread_mutex_unlock(mutex);
|
|
|
|
}
|
|
|
|
|
|
|
|
void obs_context_data_remove(struct obs_context_data *context)
|
|
|
|
{
|
|
|
|
if (context && context->mutex) {
|
|
|
|
pthread_mutex_lock(context->mutex);
|
2014-09-26 15:35:36 -07:00
|
|
|
if (context->prev_next)
|
|
|
|
*context->prev_next = context->next;
|
libobs: Add services API, reduce repeated code
Add API for streaming services. The services API simplifies the
creation of custom service features and user interface.
Custom streaming services later on will be able to do things such as:
- Be able to use service-specific APIs via modules, allowing a more
direct means of communicating with the service and requesting or
setting service-specific information
- Get URL/stream key via other means of authentication such as OAuth,
or be able to build custom URLs for services that require that sort
of thing.
- Query information (such as viewer count, chat, follower
notifications, and other information)
- Set channel information (such as current game, current channel title,
activating commercials)
Also, I reduce some repeated code that was used for all libobs objects.
This includes the name of the object, the private data, settings, as
well as the signal and procedure handlers.
I also switched to using linked lists for the global object lists,
rather than using an array of pointers (you could say it was..
pointless.) ..Anyway, the linked list info is also stored in the shared
context data structure.
2014-04-19 20:38:53 -07:00
|
|
|
if (context->next)
|
|
|
|
context->next->prev_next = context->prev_next;
|
|
|
|
pthread_mutex_unlock(context->mutex);
|
|
|
|
|
|
|
|
context->mutex = NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void obs_context_data_setname(struct obs_context_data *context,
|
|
|
|
const char *name)
|
|
|
|
{
|
2014-07-02 20:58:30 -07:00
|
|
|
pthread_mutex_lock(&context->rename_cache_mutex);
|
|
|
|
|
|
|
|
if (context->name)
|
|
|
|
da_push_back(context->rename_cache, &context->name);
|
2016-01-09 13:05:44 -08:00
|
|
|
context->name = dup_name(name, context->private);
|
2014-07-02 20:58:30 -07:00
|
|
|
|
|
|
|
pthread_mutex_unlock(&context->rename_cache_mutex);
|
libobs: Add services API, reduce repeated code
Add API for streaming services. The services API simplifies the
creation of custom service features and user interface.
Custom streaming services later on will be able to do things such as:
- Be able to use service-specific APIs via modules, allowing a more
direct means of communicating with the service and requesting or
setting service-specific information
- Get URL/stream key via other means of authentication such as OAuth,
or be able to build custom URLs for services that require that sort
of thing.
- Query information (such as viewer count, chat, follower
notifications, and other information)
- Set channel information (such as current game, current channel title,
activating commercials)
Also, I reduce some repeated code that was used for all libobs objects.
This includes the name of the object, the private data, settings, as
well as the signal and procedure handlers.
I also switched to using linked lists for the global object lists,
rather than using an array of pointers (you could say it was..
pointless.) ..Anyway, the linked list info is also stored in the shared
context data structure.
2014-04-19 20:38:53 -07:00
|
|
|
}
|
2015-08-02 03:06:00 -07:00
|
|
|
|
|
|
|
profiler_name_store_t *obs_get_profiler_name_store(void)
|
|
|
|
{
|
|
|
|
if (!obs)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
return obs->name_store;
|
|
|
|
}
|
2016-01-17 17:39:32 -08:00
|
|
|
|
|
|
|
uint64_t obs_get_video_frame_time(void)
|
|
|
|
{
|
|
|
|
return obs ? obs->video.video_time : 0;
|
|
|
|
}
|
2016-02-26 18:18:00 -08:00
|
|
|
|
|
|
|
enum obs_obj_type obs_obj_get_type(void *obj)
|
|
|
|
{
|
|
|
|
struct obs_context_data *context = obj;
|
|
|
|
return context ? context->type : OBS_OBJ_TYPE_INVALID;
|
|
|
|
}
|
2016-02-26 18:19:58 -08:00
|
|
|
|
|
|
|
const char *obs_obj_get_id(void *obj)
|
|
|
|
{
|
|
|
|
struct obs_context_data *context = obj;
|
|
|
|
if (!context)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
switch (context->type) {
|
|
|
|
case OBS_OBJ_TYPE_SOURCE: return ((obs_source_t*)obj)->info.id;
|
|
|
|
case OBS_OBJ_TYPE_OUTPUT: return ((obs_output_t*)obj)->info.id;
|
|
|
|
case OBS_OBJ_TYPE_ENCODER: return ((obs_encoder_t*)obj)->info.id;
|
|
|
|
case OBS_OBJ_TYPE_SERVICE: return ((obs_service_t*)obj)->info.id;
|
|
|
|
default:;
|
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|