2013-09-30 19:37:13 -07:00
|
|
|
/******************************************************************************
|
2014-02-13 07:58:31 -08:00
|
|
|
Copyright (C) 2013-2014 by Hugh Bailey <obs.jim@gmail.com>
|
2013-09-30 19:37:13 -07:00
|
|
|
|
|
|
|
This program is free software: you can redistribute it and/or modify
|
|
|
|
it under the terms of the GNU General Public License as published by
|
2013-12-02 21:24:38 -08:00
|
|
|
the Free Software Foundation, either version 2 of the License, or
|
2013-09-30 19:37:13 -07:00
|
|
|
(at your option) any later version.
|
|
|
|
|
|
|
|
This program is distributed in the hope that it will be useful,
|
|
|
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
GNU General Public License for more details.
|
|
|
|
|
|
|
|
You should have received a copy of the GNU General Public License
|
|
|
|
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
******************************************************************************/
|
|
|
|
|
2017-10-03 18:48:12 -07:00
|
|
|
#include <time.h>
|
|
|
|
#include <stdlib.h>
|
|
|
|
|
2013-09-30 19:37:13 -07:00
|
|
|
#include "obs.h"
|
2014-01-26 17:48:14 -08:00
|
|
|
#include "obs-internal.h"
|
2013-09-30 19:37:13 -07:00
|
|
|
#include "graphics/vec4.h"
|
2014-02-09 04:51:06 -08:00
|
|
|
#include "media-io/format-conversion.h"
|
libobs: Redesign/optimize frame encoding handling
Previously, the design for the interaction between the encoder thread
and the graphics thread was that the encoder thread would signal to the
graphics thread when to start drawing each frame. The original idea
behind this was to prevent mutually cascading stalls of encoding or
graphics rendering (i.e., if rendering took too long, then encoding
would have to catch up, then rendering would have to catch up again, and
so on, cascading upon each other). The ultimate goal was to prevent
encoding from impacting graphics and vise versa.
However, eventually it was realized that there were some fundamental
flaws with this design.
1. Stray frame duplication. You could not guarantee that a frame would
render on time, so sometimes frames would unintentionally be lost if
there was any sort of minor hiccup or if the thread took too long to
be scheduled I'm guessing.
2. Frame timing in the rendering thread was less accurate. The only
place where frame timing was accurate was in the encoder thread, and
the graphics thread was at the whim of thread scheduling. On higher
end computers it was typically fine, but it was just generally not
guaranteed that a frame would be rendered when it was supposed to be
rendered.
So the solution (originally proposed by r1ch and paibox) is to instead
keep the encoding and graphics threads separate as usual, but instead of
the encoder thread controlling the graphics thread, the graphics thread
now controls the encoder thread. The encoder thread keeps a limited
cache of frames, then the graphics thread copies frames in to the cache
and increments a semaphore to schedule the encoder thread to encode that
data.
In the cache, each frame has an encode counter. If the frame cache is
full (e.g., the encoder taking too long to return frames), it will not
cache a new frame, but instead will just increment the counter on the
last frame in the cache to schedule that frame to encode again, ensuring
that frames are on time and reducing CPU usage by lowering video
complexity. If the graphics thread takes too long to render a frame,
then it will add that frame with the count value set to the total amount
of frames that were missed (actual legitimately duplicated frames).
Because the cache gives many frames of breathing room for the encoder to
encode frames, this design helps improve results especially when using
encoding presets that have higher complexity and CPU usage, minimizing
the risk of needlessly skipped or duplicated frames.
I also managed to sneak in what should be a bit of an optimization to
reduce copying of frame data, though how much of an optimization it
ultimately ends up being is debatable.
So to sum it up, this commit increases accuracy of frame timing,
completely removes stray frame duplication, gives better results for
higher complexity encoding presets, and potentially optimizes the frame
pipeline a tiny bit.
2014-12-31 01:53:13 -08:00
|
|
|
#include "media-io/video-frame.h"
|
2013-09-30 19:37:13 -07:00
|
|
|
|
2019-11-27 16:38:35 -08:00
|
|
|
#ifdef _WIN32
|
|
|
|
#define WIN32_MEAN_AND_LEAN
|
|
|
|
#include <windows.h>
|
|
|
|
#endif
|
|
|
|
|
2014-04-19 06:33:11 -07:00
|
|
|
static uint64_t tick_sources(uint64_t cur_time, uint64_t last_time)
|
2013-09-30 19:37:13 -07:00
|
|
|
{
|
libobs: Add services API, reduce repeated code
Add API for streaming services. The services API simplifies the
creation of custom service features and user interface.
Custom streaming services later on will be able to do things such as:
- Be able to use service-specific APIs via modules, allowing a more
direct means of communicating with the service and requesting or
setting service-specific information
- Get URL/stream key via other means of authentication such as OAuth,
or be able to build custom URLs for services that require that sort
of thing.
- Query information (such as viewer count, chat, follower
notifications, and other information)
- Set channel information (such as current game, current channel title,
activating commercials)
Also, I reduce some repeated code that was used for all libobs objects.
This includes the name of the object, the private data, settings, as
well as the signal and procedure handlers.
I also switched to using linked lists for the global object lists,
rather than using an array of pointers (you could say it was..
pointless.) ..Anyway, the linked list info is also stored in the shared
context data structure.
2014-04-19 20:38:53 -07:00
|
|
|
struct obs_core_data *data = &obs->data;
|
2019-06-22 22:13:45 -07:00
|
|
|
struct obs_source *source;
|
|
|
|
uint64_t delta_time;
|
|
|
|
float seconds;
|
2013-09-30 19:37:13 -07:00
|
|
|
|
|
|
|
if (!last_time)
|
2014-08-05 15:07:54 -07:00
|
|
|
last_time = cur_time -
|
2019-06-22 22:13:45 -07:00
|
|
|
video_output_get_frame_time(obs->video.video);
|
2014-08-05 15:07:54 -07:00
|
|
|
|
2014-04-19 06:33:11 -07:00
|
|
|
delta_time = cur_time - last_time;
|
2013-09-30 19:37:13 -07:00
|
|
|
seconds = (float)((double)delta_time / 1000000000.0);
|
|
|
|
|
2017-12-06 23:13:56 -08:00
|
|
|
/* ------------------------------------- */
|
|
|
|
/* call tick callbacks */
|
|
|
|
|
|
|
|
pthread_mutex_lock(&obs->data.draw_callbacks_mutex);
|
|
|
|
|
|
|
|
for (size_t i = obs->data.tick_callbacks.num; i > 0; i--) {
|
|
|
|
struct tick_callback *callback;
|
|
|
|
callback = obs->data.tick_callbacks.array + (i - 1);
|
|
|
|
callback->tick(callback->param, seconds);
|
|
|
|
}
|
2013-09-30 19:37:13 -07:00
|
|
|
|
2017-12-06 23:13:56 -08:00
|
|
|
pthread_mutex_unlock(&obs->data.draw_callbacks_mutex);
|
|
|
|
|
|
|
|
/* ------------------------------------- */
|
libobs: Refactor source volume transition design
This changes the way source volume handles transitioning between being
active and inactive states.
The previous way that transitioning handled volume was that it set the
presentation volume of the source and all of its sub-sources to 0.0 if
the source was inactive, and 1.0 if active. Transition sources would
then also set the presentation volume for sub-sources to whatever their
transitioning volume was. However, the problem with this is that the
design didn't take in to account if the source or its sub-sources were
active anywhere else, so because of that it would break if that ever
happened, and I didn't realize that when I was designing it.
So instead, this completely overhauls the design of handling
transitioning volume. Each frame, it'll go through all sources and
check whether they're active or inactive and set the base volume
accordingly. If transitions are currently active, it will actually walk
the active source tree and check whether the source is in a
transitioning state somewhere.
- If the source is a sub-source of a transition, and it's not active
outside of the transition, then the transition will control the
volume of the source.
- If the source is a sub-source of a transition, but it's also active
outside of the transition, it'll defer to whichever is louder.
This also adds a new callback to the obs_source_info structure for
transition sources, get_transition_volume, which is called to get the
transitioning volume of a sub-source.
2014-12-27 22:16:10 -08:00
|
|
|
/* call the tick function of each source */
|
2017-12-06 23:13:56 -08:00
|
|
|
|
|
|
|
pthread_mutex_lock(&data->sources_mutex);
|
|
|
|
|
libobs: Add services API, reduce repeated code
Add API for streaming services. The services API simplifies the
creation of custom service features and user interface.
Custom streaming services later on will be able to do things such as:
- Be able to use service-specific APIs via modules, allowing a more
direct means of communicating with the service and requesting or
setting service-specific information
- Get URL/stream key via other means of authentication such as OAuth,
or be able to build custom URLs for services that require that sort
of thing.
- Query information (such as viewer count, chat, follower
notifications, and other information)
- Set channel information (such as current game, current channel title,
activating commercials)
Also, I reduce some repeated code that was used for all libobs objects.
This includes the name of the object, the private data, settings, as
well as the signal and procedure handlers.
I also switched to using linked lists for the global object lists,
rather than using an array of pointers (you could say it was..
pointless.) ..Anyway, the linked list info is also stored in the shared
context data structure.
2014-04-19 20:38:53 -07:00
|
|
|
source = data->first_source;
|
|
|
|
while (source) {
|
2019-02-12 19:16:22 -08:00
|
|
|
struct obs_source *cur_source = obs_source_get_ref(source);
|
2019-06-22 22:13:45 -07:00
|
|
|
source = (struct obs_source *)source->context.next;
|
2019-02-12 19:16:22 -08:00
|
|
|
|
|
|
|
if (cur_source) {
|
|
|
|
obs_source_video_tick(cur_source, seconds);
|
|
|
|
obs_source_release(cur_source);
|
|
|
|
}
|
libobs: Add services API, reduce repeated code
Add API for streaming services. The services API simplifies the
creation of custom service features and user interface.
Custom streaming services later on will be able to do things such as:
- Be able to use service-specific APIs via modules, allowing a more
direct means of communicating with the service and requesting or
setting service-specific information
- Get URL/stream key via other means of authentication such as OAuth,
or be able to build custom URLs for services that require that sort
of thing.
- Query information (such as viewer count, chat, follower
notifications, and other information)
- Set channel information (such as current game, current channel title,
activating commercials)
Also, I reduce some repeated code that was used for all libobs objects.
This includes the name of the object, the private data, settings, as
well as the signal and procedure handlers.
I also switched to using linked lists for the global object lists,
rather than using an array of pointers (you could say it was..
pointless.) ..Anyway, the linked list info is also stored in the shared
context data structure.
2014-04-19 20:38:53 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
pthread_mutex_unlock(&data->sources_mutex);
|
|
|
|
|
|
|
|
return cur_time;
|
2013-09-30 19:37:13 -07:00
|
|
|
}
|
|
|
|
|
2014-02-13 07:58:31 -08:00
|
|
|
/* in obs-display.c */
|
|
|
|
extern void render_display(struct obs_display *display);
|
2013-09-30 19:37:13 -07:00
|
|
|
|
2013-10-18 20:25:13 -07:00
|
|
|
static inline void render_displays(void)
|
|
|
|
{
|
libobs: Add services API, reduce repeated code
Add API for streaming services. The services API simplifies the
creation of custom service features and user interface.
Custom streaming services later on will be able to do things such as:
- Be able to use service-specific APIs via modules, allowing a more
direct means of communicating with the service and requesting or
setting service-specific information
- Get URL/stream key via other means of authentication such as OAuth,
or be able to build custom URLs for services that require that sort
of thing.
- Query information (such as viewer count, chat, follower
notifications, and other information)
- Set channel information (such as current game, current channel title,
activating commercials)
Also, I reduce some repeated code that was used for all libobs objects.
This includes the name of the object, the private data, settings, as
well as the signal and procedure handlers.
I also switched to using linked lists for the global object lists,
rather than using an array of pointers (you could say it was..
pointless.) ..Anyway, the linked list info is also stored in the shared
context data structure.
2014-04-19 20:38:53 -07:00
|
|
|
struct obs_display *display;
|
|
|
|
|
2014-01-23 16:00:42 -08:00
|
|
|
if (!obs->data.valid)
|
|
|
|
return;
|
|
|
|
|
2014-08-07 23:42:07 -07:00
|
|
|
gs_enter_context(obs->video.graphics);
|
2014-02-09 04:51:06 -08:00
|
|
|
|
2013-11-20 14:00:16 -08:00
|
|
|
/* render extra displays/swaps */
|
|
|
|
pthread_mutex_lock(&obs->data.displays_mutex);
|
2013-10-18 20:25:13 -07:00
|
|
|
|
libobs: Add services API, reduce repeated code
Add API for streaming services. The services API simplifies the
creation of custom service features and user interface.
Custom streaming services later on will be able to do things such as:
- Be able to use service-specific APIs via modules, allowing a more
direct means of communicating with the service and requesting or
setting service-specific information
- Get URL/stream key via other means of authentication such as OAuth,
or be able to build custom URLs for services that require that sort
of thing.
- Query information (such as viewer count, chat, follower
notifications, and other information)
- Set channel information (such as current game, current channel title,
activating commercials)
Also, I reduce some repeated code that was used for all libobs objects.
This includes the name of the object, the private data, settings, as
well as the signal and procedure handlers.
I also switched to using linked lists for the global object lists,
rather than using an array of pointers (you could say it was..
pointless.) ..Anyway, the linked list info is also stored in the shared
context data structure.
2014-04-19 20:38:53 -07:00
|
|
|
display = obs->data.first_display;
|
|
|
|
while (display) {
|
|
|
|
render_display(display);
|
|
|
|
display = display->next;
|
|
|
|
}
|
2013-10-18 20:25:13 -07:00
|
|
|
|
2013-11-20 14:00:16 -08:00
|
|
|
pthread_mutex_unlock(&obs->data.displays_mutex);
|
2013-10-18 20:25:13 -07:00
|
|
|
|
2014-08-07 23:42:07 -07:00
|
|
|
gs_leave_context();
|
2013-10-18 20:25:13 -07:00
|
|
|
}
|
|
|
|
|
2014-02-05 19:36:21 -08:00
|
|
|
static inline void set_render_size(uint32_t width, uint32_t height)
|
2013-09-30 19:37:13 -07:00
|
|
|
{
|
2014-08-07 23:42:07 -07:00
|
|
|
gs_enable_depth_test(false);
|
|
|
|
gs_set_cull_mode(GS_NEITHER);
|
2014-02-09 04:51:06 -08:00
|
|
|
|
2014-02-05 19:36:21 -08:00
|
|
|
gs_ortho(0.0f, (float)width, 0.0f, (float)height, -100.0f, 100.0f);
|
2014-08-07 23:42:07 -07:00
|
|
|
gs_set_viewport(0, 0, width, height);
|
2014-02-05 19:36:21 -08:00
|
|
|
}
|
|
|
|
|
2014-02-05 20:03:06 -08:00
|
|
|
static inline void unmap_last_surface(struct obs_core_video *video)
|
2014-02-05 19:36:21 -08:00
|
|
|
{
|
2019-07-26 23:21:41 -07:00
|
|
|
for (int c = 0; c < NUM_CHANNELS; ++c) {
|
|
|
|
if (video->mapped_surfaces[c]) {
|
|
|
|
gs_stagesurface_unmap(video->mapped_surfaces[c]);
|
|
|
|
video->mapped_surfaces[c] = NULL;
|
|
|
|
}
|
2013-09-30 19:37:13 -07:00
|
|
|
}
|
2014-02-05 19:36:21 -08:00
|
|
|
}
|
2013-09-30 19:37:13 -07:00
|
|
|
|
2015-07-10 23:04:46 -07:00
|
|
|
static const char *render_main_texture_name = "render_main_texture";
|
2019-05-24 01:03:21 -07:00
|
|
|
static inline void render_main_texture(struct obs_core_video *video)
|
2014-02-05 19:36:21 -08:00
|
|
|
{
|
2015-07-10 23:04:46 -07:00
|
|
|
profile_start(render_main_texture_name);
|
2019-04-02 23:23:37 -07:00
|
|
|
GS_DEBUG_MARKER_BEGIN(GS_DEBUG_COLOR_MAIN_TEXTURE,
|
2019-06-22 22:13:45 -07:00
|
|
|
render_main_texture_name);
|
2015-07-10 23:04:46 -07:00
|
|
|
|
2014-02-05 19:36:21 -08:00
|
|
|
struct vec4 clear_color;
|
2019-04-25 08:36:41 -07:00
|
|
|
vec4_set(&clear_color, 0.0f, 0.0f, 0.0f, 0.0f);
|
2013-09-30 19:37:13 -07:00
|
|
|
|
2019-05-24 01:03:21 -07:00
|
|
|
gs_set_render_target(video->render_texture, NULL);
|
2014-02-05 19:36:21 -08:00
|
|
|
gs_clear(GS_CLEAR_COLOR, &clear_color, 1.0f, 0);
|
2013-09-30 19:37:13 -07:00
|
|
|
|
2014-02-05 19:36:21 -08:00
|
|
|
set_render_size(video->base_width, video->base_height);
|
2017-04-24 03:22:19 -07:00
|
|
|
|
|
|
|
pthread_mutex_lock(&obs->data.draw_callbacks_mutex);
|
|
|
|
|
2017-12-06 23:13:56 -08:00
|
|
|
for (size_t i = obs->data.draw_callbacks.num; i > 0; i--) {
|
2017-04-24 03:22:19 -07:00
|
|
|
struct draw_callback *callback;
|
2017-12-06 23:13:56 -08:00
|
|
|
callback = obs->data.draw_callbacks.array + (i - 1);
|
2017-04-24 03:22:19 -07:00
|
|
|
|
2019-06-22 22:13:45 -07:00
|
|
|
callback->draw(callback->param, video->base_width,
|
|
|
|
video->base_height);
|
2017-04-24 03:22:19 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
pthread_mutex_unlock(&obs->data.draw_callbacks_mutex);
|
|
|
|
|
2014-02-13 09:21:16 -08:00
|
|
|
obs_view_render(&obs->data.main_view);
|
2013-09-30 19:37:13 -07:00
|
|
|
|
2019-05-24 01:03:21 -07:00
|
|
|
video->texture_rendered = true;
|
2015-07-10 23:04:46 -07:00
|
|
|
|
2019-04-02 23:23:37 -07:00
|
|
|
GS_DEBUG_MARKER_END();
|
2015-07-10 23:04:46 -07:00
|
|
|
profile_end(render_main_texture_name);
|
2014-02-05 19:36:21 -08:00
|
|
|
}
|
|
|
|
|
2019-06-22 22:13:45 -07:00
|
|
|
static inline gs_effect_t *
|
|
|
|
get_scale_effect_internal(struct obs_core_video *video)
|
2014-12-14 23:45:44 -08:00
|
|
|
{
|
2015-04-06 07:35:09 -07:00
|
|
|
/* if the dimension is under half the size of the original image,
|
|
|
|
* bicubic/lanczos can't sample enough pixels to create an accurate
|
|
|
|
* image, so use the bilinear low resolution effect instead */
|
2019-06-22 22:13:45 -07:00
|
|
|
if (video->output_width < (video->base_width / 2) &&
|
2015-04-06 07:35:09 -07:00
|
|
|
video->output_height < (video->base_height / 2)) {
|
|
|
|
return video->bilinear_lowres_effect;
|
|
|
|
}
|
|
|
|
|
2014-12-14 23:45:44 -08:00
|
|
|
switch (video->scale_type) {
|
2019-06-22 22:13:45 -07:00
|
|
|
case OBS_SCALE_BILINEAR:
|
|
|
|
return video->default_effect;
|
|
|
|
case OBS_SCALE_LANCZOS:
|
|
|
|
return video->lanczos_effect;
|
2019-08-14 22:29:30 -07:00
|
|
|
case OBS_SCALE_AREA:
|
|
|
|
return video->area_effect;
|
2016-06-29 06:08:54 -07:00
|
|
|
case OBS_SCALE_BICUBIC:
|
|
|
|
default:;
|
2014-12-14 23:45:44 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
return video->bicubic_effect;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline bool resolution_close(struct obs_core_video *video,
|
2019-06-22 22:13:45 -07:00
|
|
|
uint32_t width, uint32_t height)
|
2014-12-14 23:45:44 -08:00
|
|
|
{
|
2019-06-22 22:13:45 -07:00
|
|
|
long width_cmp = (long)video->base_width - (long)width;
|
2014-12-14 23:45:44 -08:00
|
|
|
long height_cmp = (long)video->base_height - (long)height;
|
|
|
|
|
|
|
|
return labs(width_cmp) <= 16 && labs(height_cmp) <= 16;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline gs_effect_t *get_scale_effect(struct obs_core_video *video,
|
2019-06-22 22:13:45 -07:00
|
|
|
uint32_t width, uint32_t height)
|
2014-12-14 23:45:44 -08:00
|
|
|
{
|
|
|
|
if (resolution_close(video, width, height)) {
|
|
|
|
return video->default_effect;
|
|
|
|
} else {
|
|
|
|
/* if the scale method couldn't be loaded, use either bicubic
|
|
|
|
* or bilinear by default */
|
|
|
|
gs_effect_t *effect = get_scale_effect_internal(video);
|
|
|
|
if (!effect)
|
2019-06-22 22:13:45 -07:00
|
|
|
effect = !!video->bicubic_effect
|
|
|
|
? video->bicubic_effect
|
|
|
|
: video->default_effect;
|
2014-12-14 23:45:44 -08:00
|
|
|
return effect;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-07-10 23:04:46 -07:00
|
|
|
static const char *render_output_texture_name = "render_output_texture";
|
libobs: Rework RGB to YUV conversion
RGB to YUV converison was previously baked into every scale shader, but
this work has been moved to the YUV packing shaders. The scale shaders
now write RGBA instead. In the case where base and output resolutions
are identical, the render texture is forwarded directly to the YUV pack
step, skipping an entire fullscreen pass.
Intel GPA, SetStablePowerState, Intel HD Graphics 530, NV12
1920x1080, Before:
RGBA -> UYVX: ~321 us
UYVX -> Y: ~480 us
UYVX -> UV: ~127 us
1920x1080, After:
[forward render texture]
RGBA -> Y: ~487 us
RGBA -> UV: ~131 us
1920x1080 -> 1280x720, Before:
RGBA -> UYVX: ~268 us
UYVX -> Y: ~209 us
UYVX -> UV: ~57 us
1920x1080 -> 1280x720, After:
RGBA -> RGBA (rescale): ~268 us
RGBA -> Y: ~210 us
RGBA -> UV: ~58 us
2019-07-22 01:12:35 -07:00
|
|
|
static inline gs_texture_t *render_output_texture(struct obs_core_video *video)
|
2014-02-05 19:36:21 -08:00
|
|
|
{
|
2019-05-24 01:03:21 -07:00
|
|
|
gs_texture_t *texture = video->render_texture;
|
2019-06-22 22:13:45 -07:00
|
|
|
gs_texture_t *target = video->output_texture;
|
|
|
|
uint32_t width = gs_texture_get_width(target);
|
|
|
|
uint32_t height = gs_texture_get_height(target);
|
2014-02-05 19:36:21 -08:00
|
|
|
|
2019-06-22 22:13:45 -07:00
|
|
|
gs_effect_t *effect = get_scale_effect(video, width, height);
|
2018-06-23 15:21:07 -07:00
|
|
|
gs_technique_t *tech;
|
|
|
|
|
|
|
|
if (video->ovi.output_format == VIDEO_FORMAT_RGBA) {
|
libobs: Fix various alpha issues
There are cases where alpha is multiplied unnecessarily. This change
attempts to use premultiplied alpha blending for composition.
To keep this change simple, The filter chain will continue to use
straight alpha. Otherwise, every source would need to modified to output
premultiplied, and every filter modified for premultiplied input.
"DrawAlphaDivide" shader techniques have been added to convert from
premultiplied alpha to straight alpha for final output. "DrawMatrix"
techniques ignore alpha, so they do not appear to need changing.
One remaining issue is that scale effects are set up here to use the
same shader logic for both scale filters (straight alpha - incorrectly),
and output composition (premultiplied alpha - correctly). A fix could be
made to add additional shaders for straight alpha, but the "real" fix
may be to eliminate the straight alpha path at some point.
For graphics, SrcBlendAlpha and DestBlendAlpha were both ONE, and could
combine together to form alpha values greater than one. This is not as
noticeable of a problem for UNORM targets because the channels are
clamped, but it will likely become a problem in more situations if FLOAT
targets are used.
This change switches DestBlendAlpha to INVSRCALPHA. The blending
behavior of stacked transparents is preserved without overflowing the
alpha channel.
obs-transitions: Use premultiplied alpha blend, and simplify shaders
because both inputs and outputs use premultiplied alpha now.
Fixes https://obsproject.com/mantis/view.php?id=1108
2019-05-03 03:54:17 -07:00
|
|
|
tech = gs_effect_get_technique(effect, "DrawAlphaDivide");
|
2018-06-23 15:21:07 -07:00
|
|
|
} else {
|
libobs: Rework RGB to YUV conversion
RGB to YUV converison was previously baked into every scale shader, but
this work has been moved to the YUV packing shaders. The scale shaders
now write RGBA instead. In the case where base and output resolutions
are identical, the render texture is forwarded directly to the YUV pack
step, skipping an entire fullscreen pass.
Intel GPA, SetStablePowerState, Intel HD Graphics 530, NV12
1920x1080, Before:
RGBA -> UYVX: ~321 us
UYVX -> Y: ~480 us
UYVX -> UV: ~127 us
1920x1080, After:
[forward render texture]
RGBA -> Y: ~487 us
RGBA -> UV: ~131 us
1920x1080 -> 1280x720, Before:
RGBA -> UYVX: ~268 us
UYVX -> Y: ~209 us
UYVX -> UV: ~57 us
1920x1080 -> 1280x720, After:
RGBA -> RGBA (rescale): ~268 us
RGBA -> Y: ~210 us
RGBA -> UV: ~58 us
2019-07-22 01:12:35 -07:00
|
|
|
if ((effect == video->default_effect) &&
|
|
|
|
(width == video->base_width) &&
|
|
|
|
(height == video->base_height))
|
|
|
|
return texture;
|
|
|
|
|
|
|
|
tech = gs_effect_get_technique(effect, "Draw");
|
2018-06-23 15:21:07 -07:00
|
|
|
}
|
|
|
|
|
libobs: Rework RGB to YUV conversion
RGB to YUV converison was previously baked into every scale shader, but
this work has been moved to the YUV packing shaders. The scale shaders
now write RGBA instead. In the case where base and output resolutions
are identical, the render texture is forwarded directly to the YUV pack
step, skipping an entire fullscreen pass.
Intel GPA, SetStablePowerState, Intel HD Graphics 530, NV12
1920x1080, Before:
RGBA -> UYVX: ~321 us
UYVX -> Y: ~480 us
UYVX -> UV: ~127 us
1920x1080, After:
[forward render texture]
RGBA -> Y: ~487 us
RGBA -> UV: ~131 us
1920x1080 -> 1280x720, Before:
RGBA -> UYVX: ~268 us
UYVX -> Y: ~209 us
UYVX -> UV: ~57 us
1920x1080 -> 1280x720, After:
RGBA -> RGBA (rescale): ~268 us
RGBA -> Y: ~210 us
RGBA -> UV: ~58 us
2019-07-22 01:12:35 -07:00
|
|
|
profile_start(render_output_texture_name);
|
|
|
|
|
2019-06-22 22:13:45 -07:00
|
|
|
gs_eparam_t *image = gs_effect_get_param_by_name(effect, "image");
|
2019-07-17 21:11:18 -07:00
|
|
|
gs_eparam_t *bres =
|
|
|
|
gs_effect_get_param_by_name(effect, "base_dimension");
|
2019-06-22 22:13:45 -07:00
|
|
|
gs_eparam_t *bres_i =
|
|
|
|
gs_effect_get_param_by_name(effect, "base_dimension_i");
|
|
|
|
size_t passes, i;
|
2014-02-05 19:36:21 -08:00
|
|
|
|
2014-08-07 23:42:07 -07:00
|
|
|
gs_set_render_target(target, NULL);
|
2014-02-05 19:36:21 -08:00
|
|
|
set_render_size(width, height);
|
|
|
|
|
libobs: Rework RGB to YUV conversion
RGB to YUV converison was previously baked into every scale shader, but
this work has been moved to the YUV packing shaders. The scale shaders
now write RGBA instead. In the case where base and output resolutions
are identical, the render texture is forwarded directly to the YUV pack
step, skipping an entire fullscreen pass.
Intel GPA, SetStablePowerState, Intel HD Graphics 530, NV12
1920x1080, Before:
RGBA -> UYVX: ~321 us
UYVX -> Y: ~480 us
UYVX -> UV: ~127 us
1920x1080, After:
[forward render texture]
RGBA -> Y: ~487 us
RGBA -> UV: ~131 us
1920x1080 -> 1280x720, Before:
RGBA -> UYVX: ~268 us
UYVX -> Y: ~209 us
UYVX -> UV: ~57 us
1920x1080 -> 1280x720, After:
RGBA -> RGBA (rescale): ~268 us
RGBA -> Y: ~210 us
RGBA -> UV: ~58 us
2019-07-22 01:12:35 -07:00
|
|
|
if (bres) {
|
|
|
|
struct vec2 base;
|
|
|
|
vec2_set(&base, (float)video->base_width,
|
|
|
|
(float)video->base_height);
|
2019-07-17 21:11:18 -07:00
|
|
|
gs_effect_set_vec2(bres, &base);
|
libobs: Rework RGB to YUV conversion
RGB to YUV converison was previously baked into every scale shader, but
this work has been moved to the YUV packing shaders. The scale shaders
now write RGBA instead. In the case where base and output resolutions
are identical, the render texture is forwarded directly to the YUV pack
step, skipping an entire fullscreen pass.
Intel GPA, SetStablePowerState, Intel HD Graphics 530, NV12
1920x1080, Before:
RGBA -> UYVX: ~321 us
UYVX -> Y: ~480 us
UYVX -> UV: ~127 us
1920x1080, After:
[forward render texture]
RGBA -> Y: ~487 us
RGBA -> UV: ~131 us
1920x1080 -> 1280x720, Before:
RGBA -> UYVX: ~268 us
UYVX -> Y: ~209 us
UYVX -> UV: ~57 us
1920x1080 -> 1280x720, After:
RGBA -> RGBA (rescale): ~268 us
RGBA -> Y: ~210 us
RGBA -> UV: ~58 us
2019-07-22 01:12:35 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
if (bres_i) {
|
|
|
|
struct vec2 base_i;
|
|
|
|
vec2_set(&base_i, 1.0f / (float)video->base_width,
|
|
|
|
1.0f / (float)video->base_height);
|
2014-12-14 23:45:44 -08:00
|
|
|
gs_effect_set_vec2(bres_i, &base_i);
|
libobs: Rework RGB to YUV conversion
RGB to YUV converison was previously baked into every scale shader, but
this work has been moved to the YUV packing shaders. The scale shaders
now write RGBA instead. In the case where base and output resolutions
are identical, the render texture is forwarded directly to the YUV pack
step, skipping an entire fullscreen pass.
Intel GPA, SetStablePowerState, Intel HD Graphics 530, NV12
1920x1080, Before:
RGBA -> UYVX: ~321 us
UYVX -> Y: ~480 us
UYVX -> UV: ~127 us
1920x1080, After:
[forward render texture]
RGBA -> Y: ~487 us
RGBA -> UV: ~131 us
1920x1080 -> 1280x720, Before:
RGBA -> UYVX: ~268 us
UYVX -> Y: ~209 us
UYVX -> UV: ~57 us
1920x1080 -> 1280x720, After:
RGBA -> RGBA (rescale): ~268 us
RGBA -> Y: ~210 us
RGBA -> UV: ~58 us
2019-07-22 01:12:35 -07:00
|
|
|
}
|
2014-12-14 23:45:44 -08:00
|
|
|
|
2014-08-07 23:42:07 -07:00
|
|
|
gs_effect_set_texture(image, texture);
|
2014-02-05 19:36:21 -08:00
|
|
|
|
2014-10-14 08:40:34 -07:00
|
|
|
gs_enable_blending(false);
|
2014-08-07 23:42:07 -07:00
|
|
|
passes = gs_technique_begin(tech);
|
2014-02-05 19:36:21 -08:00
|
|
|
for (i = 0; i < passes; i++) {
|
2014-08-07 23:42:07 -07:00
|
|
|
gs_technique_begin_pass(tech, i);
|
2014-02-05 19:36:21 -08:00
|
|
|
gs_draw_sprite(texture, 0, width, height);
|
2014-08-07 23:42:07 -07:00
|
|
|
gs_technique_end_pass(tech);
|
2014-02-05 19:36:21 -08:00
|
|
|
}
|
2014-08-07 23:42:07 -07:00
|
|
|
gs_technique_end(tech);
|
2014-10-14 08:40:34 -07:00
|
|
|
gs_enable_blending(true);
|
2014-02-05 19:36:21 -08:00
|
|
|
|
2015-07-10 23:04:46 -07:00
|
|
|
profile_end(render_output_texture_name);
|
libobs: Rework RGB to YUV conversion
RGB to YUV converison was previously baked into every scale shader, but
this work has been moved to the YUV packing shaders. The scale shaders
now write RGBA instead. In the case where base and output resolutions
are identical, the render texture is forwarded directly to the YUV pack
step, skipping an entire fullscreen pass.
Intel GPA, SetStablePowerState, Intel HD Graphics 530, NV12
1920x1080, Before:
RGBA -> UYVX: ~321 us
UYVX -> Y: ~480 us
UYVX -> UV: ~127 us
1920x1080, After:
[forward render texture]
RGBA -> Y: ~487 us
RGBA -> UV: ~131 us
1920x1080 -> 1280x720, Before:
RGBA -> UYVX: ~268 us
UYVX -> Y: ~209 us
UYVX -> UV: ~57 us
1920x1080 -> 1280x720, After:
RGBA -> RGBA (rescale): ~268 us
RGBA -> Y: ~210 us
RGBA -> UV: ~58 us
2019-07-22 01:12:35 -07:00
|
|
|
|
|
|
|
return target;
|
2014-02-05 19:36:21 -08:00
|
|
|
}
|
|
|
|
|
2019-08-30 22:13:03 -07:00
|
|
|
static void render_convert_plane(gs_effect_t *effect, gs_texture_t *target,
|
|
|
|
const char *tech_name)
|
2014-02-05 19:36:21 -08:00
|
|
|
{
|
2019-07-26 23:21:41 -07:00
|
|
|
gs_technique_t *tech = gs_effect_get_technique(effect, tech_name);
|
libobs: Rework RGB to YUV conversion
RGB to YUV converison was previously baked into every scale shader, but
this work has been moved to the YUV packing shaders. The scale shaders
now write RGBA instead. In the case where base and output resolutions
are identical, the render texture is forwarded directly to the YUV pack
step, skipping an entire fullscreen pass.
Intel GPA, SetStablePowerState, Intel HD Graphics 530, NV12
1920x1080, Before:
RGBA -> UYVX: ~321 us
UYVX -> Y: ~480 us
UYVX -> UV: ~127 us
1920x1080, After:
[forward render texture]
RGBA -> Y: ~487 us
RGBA -> UV: ~131 us
1920x1080 -> 1280x720, Before:
RGBA -> UYVX: ~268 us
UYVX -> Y: ~209 us
UYVX -> UV: ~57 us
1920x1080 -> 1280x720, After:
RGBA -> RGBA (rescale): ~268 us
RGBA -> Y: ~210 us
RGBA -> UV: ~58 us
2019-07-22 01:12:35 -07:00
|
|
|
|
2019-07-26 23:21:41 -07:00
|
|
|
const uint32_t width = gs_texture_get_width(target);
|
|
|
|
const uint32_t height = gs_texture_get_height(target);
|
2014-02-16 18:28:21 -08:00
|
|
|
|
2014-08-07 23:42:07 -07:00
|
|
|
gs_set_render_target(target, NULL);
|
2019-07-26 23:21:41 -07:00
|
|
|
set_render_size(width, height);
|
2014-02-16 18:28:21 -08:00
|
|
|
|
2019-07-26 23:21:41 -07:00
|
|
|
size_t passes = gs_technique_begin(tech);
|
|
|
|
for (size_t i = 0; i < passes; i++) {
|
2014-08-07 23:42:07 -07:00
|
|
|
gs_technique_begin_pass(tech, i);
|
2019-06-02 06:49:38 -07:00
|
|
|
gs_draw(GS_TRIS, 0, 3);
|
2014-08-07 23:42:07 -07:00
|
|
|
gs_technique_end_pass(tech);
|
2014-02-16 18:28:21 -08:00
|
|
|
}
|
2014-08-07 23:42:07 -07:00
|
|
|
gs_technique_end(tech);
|
2014-02-16 18:28:21 -08:00
|
|
|
}
|
|
|
|
|
2019-07-26 23:21:41 -07:00
|
|
|
static const char *render_convert_texture_name = "render_convert_texture";
|
|
|
|
static void render_convert_texture(struct obs_core_video *video,
|
|
|
|
gs_texture_t *texture)
|
2018-10-05 20:18:15 -07:00
|
|
|
{
|
2019-07-26 23:21:41 -07:00
|
|
|
profile_start(render_convert_texture_name);
|
|
|
|
|
2019-06-22 22:13:45 -07:00
|
|
|
gs_effect_t *effect = video->conversion_effect;
|
2019-08-09 20:43:14 -07:00
|
|
|
gs_eparam_t *color_vec0 =
|
|
|
|
gs_effect_get_param_by_name(effect, "color_vec0");
|
|
|
|
gs_eparam_t *color_vec1 =
|
|
|
|
gs_effect_get_param_by_name(effect, "color_vec1");
|
|
|
|
gs_eparam_t *color_vec2 =
|
|
|
|
gs_effect_get_param_by_name(effect, "color_vec2");
|
2019-06-22 22:13:45 -07:00
|
|
|
gs_eparam_t *image = gs_effect_get_param_by_name(effect, "image");
|
2019-07-26 23:21:41 -07:00
|
|
|
gs_eparam_t *width_i = gs_effect_get_param_by_name(effect, "width_i");
|
2018-10-05 20:18:15 -07:00
|
|
|
|
2019-08-09 20:43:14 -07:00
|
|
|
struct vec4 vec0, vec1, vec2;
|
|
|
|
vec4_set(&vec0, video->color_matrix[4], video->color_matrix[5],
|
libobs: Rework RGB to YUV conversion
RGB to YUV converison was previously baked into every scale shader, but
this work has been moved to the YUV packing shaders. The scale shaders
now write RGBA instead. In the case where base and output resolutions
are identical, the render texture is forwarded directly to the YUV pack
step, skipping an entire fullscreen pass.
Intel GPA, SetStablePowerState, Intel HD Graphics 530, NV12
1920x1080, Before:
RGBA -> UYVX: ~321 us
UYVX -> Y: ~480 us
UYVX -> UV: ~127 us
1920x1080, After:
[forward render texture]
RGBA -> Y: ~487 us
RGBA -> UV: ~131 us
1920x1080 -> 1280x720, Before:
RGBA -> UYVX: ~268 us
UYVX -> Y: ~209 us
UYVX -> UV: ~57 us
1920x1080 -> 1280x720, After:
RGBA -> RGBA (rescale): ~268 us
RGBA -> Y: ~210 us
RGBA -> UV: ~58 us
2019-07-22 01:12:35 -07:00
|
|
|
video->color_matrix[6], video->color_matrix[7]);
|
2019-08-09 20:43:14 -07:00
|
|
|
vec4_set(&vec1, video->color_matrix[0], video->color_matrix[1],
|
libobs: Rework RGB to YUV conversion
RGB to YUV converison was previously baked into every scale shader, but
this work has been moved to the YUV packing shaders. The scale shaders
now write RGBA instead. In the case where base and output resolutions
are identical, the render texture is forwarded directly to the YUV pack
step, skipping an entire fullscreen pass.
Intel GPA, SetStablePowerState, Intel HD Graphics 530, NV12
1920x1080, Before:
RGBA -> UYVX: ~321 us
UYVX -> Y: ~480 us
UYVX -> UV: ~127 us
1920x1080, After:
[forward render texture]
RGBA -> Y: ~487 us
RGBA -> UV: ~131 us
1920x1080 -> 1280x720, Before:
RGBA -> UYVX: ~268 us
UYVX -> Y: ~209 us
UYVX -> UV: ~57 us
1920x1080 -> 1280x720, After:
RGBA -> RGBA (rescale): ~268 us
RGBA -> Y: ~210 us
RGBA -> UV: ~58 us
2019-07-22 01:12:35 -07:00
|
|
|
video->color_matrix[2], video->color_matrix[3]);
|
2019-08-09 20:43:14 -07:00
|
|
|
vec4_set(&vec2, video->color_matrix[8], video->color_matrix[9],
|
libobs: Rework RGB to YUV conversion
RGB to YUV converison was previously baked into every scale shader, but
this work has been moved to the YUV packing shaders. The scale shaders
now write RGBA instead. In the case where base and output resolutions
are identical, the render texture is forwarded directly to the YUV pack
step, skipping an entire fullscreen pass.
Intel GPA, SetStablePowerState, Intel HD Graphics 530, NV12
1920x1080, Before:
RGBA -> UYVX: ~321 us
UYVX -> Y: ~480 us
UYVX -> UV: ~127 us
1920x1080, After:
[forward render texture]
RGBA -> Y: ~487 us
RGBA -> UV: ~131 us
1920x1080 -> 1280x720, Before:
RGBA -> UYVX: ~268 us
UYVX -> Y: ~209 us
UYVX -> UV: ~57 us
1920x1080 -> 1280x720, After:
RGBA -> RGBA (rescale): ~268 us
RGBA -> Y: ~210 us
RGBA -> UV: ~58 us
2019-07-22 01:12:35 -07:00
|
|
|
video->color_matrix[10], video->color_matrix[11]);
|
2018-10-05 20:18:15 -07:00
|
|
|
|
|
|
|
gs_enable_blending(false);
|
|
|
|
|
2019-07-26 23:21:41 -07:00
|
|
|
if (video->convert_textures[0]) {
|
|
|
|
gs_effect_set_texture(image, texture);
|
2019-08-09 20:43:14 -07:00
|
|
|
gs_effect_set_vec4(color_vec0, &vec0);
|
2019-08-30 22:13:03 -07:00
|
|
|
render_convert_plane(effect, video->convert_textures[0],
|
2019-07-26 23:21:41 -07:00
|
|
|
video->conversion_techs[0]);
|
|
|
|
|
|
|
|
if (video->convert_textures[1]) {
|
|
|
|
gs_effect_set_texture(image, texture);
|
2019-08-09 20:43:14 -07:00
|
|
|
gs_effect_set_vec4(color_vec1, &vec1);
|
2019-07-26 23:21:41 -07:00
|
|
|
if (!video->convert_textures[2])
|
2019-08-09 20:43:14 -07:00
|
|
|
gs_effect_set_vec4(color_vec2, &vec2);
|
2019-07-26 23:21:41 -07:00
|
|
|
gs_effect_set_float(width_i, video->conversion_width_i);
|
2019-08-30 22:13:03 -07:00
|
|
|
render_convert_plane(effect, video->convert_textures[1],
|
2019-07-26 23:21:41 -07:00
|
|
|
video->conversion_techs[1]);
|
|
|
|
|
|
|
|
if (video->convert_textures[2]) {
|
|
|
|
gs_effect_set_texture(image, texture);
|
2019-08-09 20:43:14 -07:00
|
|
|
gs_effect_set_vec4(color_vec2, &vec2);
|
2019-07-26 23:21:41 -07:00
|
|
|
gs_effect_set_float(width_i,
|
|
|
|
video->conversion_width_i);
|
|
|
|
render_convert_plane(
|
2019-08-30 22:13:03 -07:00
|
|
|
effect, video->convert_textures[2],
|
2019-07-26 23:21:41 -07:00
|
|
|
video->conversion_techs[2]);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2018-10-05 20:18:15 -07:00
|
|
|
|
2019-07-26 23:21:41 -07:00
|
|
|
gs_enable_blending(true);
|
2018-10-05 20:18:15 -07:00
|
|
|
|
2019-05-24 01:03:21 -07:00
|
|
|
video->texture_converted = true;
|
2018-10-05 20:18:15 -07:00
|
|
|
|
2019-07-26 23:21:41 -07:00
|
|
|
profile_end(render_convert_texture_name);
|
2018-10-05 20:18:15 -07:00
|
|
|
}
|
|
|
|
|
2015-07-10 23:04:46 -07:00
|
|
|
static const char *stage_output_texture_name = "stage_output_texture";
|
2014-02-16 18:28:21 -08:00
|
|
|
static inline void stage_output_texture(struct obs_core_video *video,
|
2019-07-26 23:21:41 -07:00
|
|
|
int cur_texture)
|
2014-02-16 18:28:21 -08:00
|
|
|
{
|
2015-07-10 23:04:46 -07:00
|
|
|
profile_start(stage_output_texture_name);
|
|
|
|
|
2014-02-16 18:28:21 -08:00
|
|
|
unmap_last_surface(video);
|
|
|
|
|
2019-07-26 23:21:41 -07:00
|
|
|
if (!video->gpu_conversion) {
|
|
|
|
gs_stagesurf_t *copy = video->copy_surfaces[cur_texture][0];
|
|
|
|
if (copy)
|
|
|
|
gs_stage_texture(copy, video->output_texture);
|
|
|
|
|
|
|
|
video->textures_copied[cur_texture] = true;
|
|
|
|
} else if (video->texture_converted) {
|
|
|
|
for (int i = 0; i < NUM_CHANNELS; i++) {
|
|
|
|
gs_stagesurf_t *copy =
|
|
|
|
video->copy_surfaces[cur_texture][i];
|
|
|
|
if (copy)
|
|
|
|
gs_stage_texture(copy,
|
|
|
|
video->convert_textures[i]);
|
|
|
|
}
|
2014-02-05 19:36:21 -08:00
|
|
|
|
2019-07-26 23:21:41 -07:00
|
|
|
video->textures_copied[cur_texture] = true;
|
|
|
|
}
|
2015-07-10 23:04:46 -07:00
|
|
|
|
|
|
|
profile_end(stage_output_texture_name);
|
2014-02-05 19:36:21 -08:00
|
|
|
}
|
|
|
|
|
2019-02-05 17:37:40 -08:00
|
|
|
#ifdef _WIN32
|
|
|
|
static inline bool queue_frame(struct obs_core_video *video, bool raw_active,
|
2019-06-22 22:13:45 -07:00
|
|
|
struct obs_vframe_info *vframe_info)
|
2019-02-05 17:37:40 -08:00
|
|
|
{
|
2019-06-22 22:13:45 -07:00
|
|
|
bool duplicate =
|
|
|
|
!video->gpu_encoder_avail_queue.size ||
|
2019-02-05 17:37:40 -08:00
|
|
|
(video->gpu_encoder_queue.size && vframe_info->count > 1);
|
|
|
|
|
|
|
|
if (duplicate) {
|
|
|
|
struct obs_tex_frame *tf = circlebuf_data(
|
2019-06-22 22:13:45 -07:00
|
|
|
&video->gpu_encoder_queue,
|
|
|
|
video->gpu_encoder_queue.size - sizeof(*tf));
|
2019-02-05 17:37:40 -08:00
|
|
|
|
|
|
|
/* texture-based encoding is stopping */
|
|
|
|
if (!tf) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
tf->count++;
|
|
|
|
os_sem_post(video->gpu_encode_semaphore);
|
|
|
|
goto finish;
|
|
|
|
}
|
|
|
|
|
|
|
|
struct obs_tex_frame tf;
|
|
|
|
circlebuf_pop_front(&video->gpu_encoder_avail_queue, &tf, sizeof(tf));
|
|
|
|
|
|
|
|
if (tf.released) {
|
|
|
|
gs_texture_acquire_sync(tf.tex, tf.lock_key, GS_WAIT_INFINITE);
|
|
|
|
tf.released = false;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* the vframe_info->count > 1 case causing a copy can only happen if by
|
|
|
|
* some chance the very first frame has to be duplicated for whatever
|
|
|
|
* reason. otherwise, it goes to the 'duplicate' case above, which
|
|
|
|
* will ensure better performance. */
|
|
|
|
if (raw_active || vframe_info->count > 1) {
|
2019-07-26 23:21:41 -07:00
|
|
|
gs_copy_texture(tf.tex, video->convert_textures[0]);
|
2019-02-05 17:37:40 -08:00
|
|
|
} else {
|
2019-07-26 23:21:41 -07:00
|
|
|
gs_texture_t *tex = video->convert_textures[0];
|
|
|
|
gs_texture_t *tex_uv = video->convert_textures[1];
|
2019-02-05 17:37:40 -08:00
|
|
|
|
2019-07-26 23:21:41 -07:00
|
|
|
video->convert_textures[0] = tf.tex;
|
|
|
|
video->convert_textures[1] = tf.tex_uv;
|
2019-02-05 17:37:40 -08:00
|
|
|
|
|
|
|
tf.tex = tex;
|
|
|
|
tf.tex_uv = tex_uv;
|
|
|
|
}
|
|
|
|
|
|
|
|
tf.count = 1;
|
|
|
|
tf.timestamp = vframe_info->timestamp;
|
|
|
|
tf.released = true;
|
2019-03-03 12:36:31 -08:00
|
|
|
tf.handle = gs_texture_get_shared_handle(tf.tex);
|
2019-02-05 17:37:40 -08:00
|
|
|
gs_texture_release_sync(tf.tex, ++tf.lock_key);
|
|
|
|
circlebuf_push_back(&video->gpu_encoder_queue, &tf, sizeof(tf));
|
|
|
|
|
|
|
|
os_sem_post(video->gpu_encode_semaphore);
|
|
|
|
|
|
|
|
finish:
|
|
|
|
return --vframe_info->count;
|
|
|
|
}
|
|
|
|
|
|
|
|
extern void full_stop(struct obs_encoder *encoder);
|
|
|
|
|
|
|
|
static inline void encode_gpu(struct obs_core_video *video, bool raw_active,
|
2019-06-22 22:13:45 -07:00
|
|
|
struct obs_vframe_info *vframe_info)
|
2019-02-05 17:37:40 -08:00
|
|
|
{
|
2019-06-22 22:13:45 -07:00
|
|
|
while (queue_frame(video, raw_active, vframe_info))
|
|
|
|
;
|
2019-02-05 17:37:40 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
static const char *output_gpu_encoders_name = "output_gpu_encoders";
|
2019-05-24 01:03:21 -07:00
|
|
|
static void output_gpu_encoders(struct obs_core_video *video, bool raw_active)
|
2019-02-05 17:37:40 -08:00
|
|
|
{
|
|
|
|
profile_start(output_gpu_encoders_name);
|
|
|
|
|
2019-05-24 01:03:21 -07:00
|
|
|
if (!video->texture_converted)
|
2019-02-05 17:37:40 -08:00
|
|
|
goto end;
|
2019-02-10 22:22:31 -08:00
|
|
|
if (!video->vframe_info_buffer_gpu.size)
|
|
|
|
goto end;
|
2019-02-05 17:37:40 -08:00
|
|
|
|
|
|
|
struct obs_vframe_info vframe_info;
|
|
|
|
circlebuf_pop_front(&video->vframe_info_buffer_gpu, &vframe_info,
|
2019-06-22 22:13:45 -07:00
|
|
|
sizeof(vframe_info));
|
2019-02-05 17:37:40 -08:00
|
|
|
|
|
|
|
pthread_mutex_lock(&video->gpu_encoder_mutex);
|
2019-05-24 01:03:21 -07:00
|
|
|
encode_gpu(video, raw_active, &vframe_info);
|
2019-02-05 17:37:40 -08:00
|
|
|
pthread_mutex_unlock(&video->gpu_encoder_mutex);
|
|
|
|
|
|
|
|
end:
|
|
|
|
profile_end(output_gpu_encoders_name);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2019-05-24 01:03:21 -07:00
|
|
|
static inline void render_video(struct obs_core_video *video, bool raw_active,
|
2019-06-22 22:13:45 -07:00
|
|
|
const bool gpu_active, int cur_texture)
|
2014-02-05 19:36:21 -08:00
|
|
|
{
|
2014-08-07 23:42:07 -07:00
|
|
|
gs_begin_scene();
|
2014-02-05 19:36:21 -08:00
|
|
|
|
2014-08-07 23:42:07 -07:00
|
|
|
gs_enable_depth_test(false);
|
|
|
|
gs_set_cull_mode(GS_NEITHER);
|
2014-02-05 19:36:21 -08:00
|
|
|
|
2019-05-24 01:03:21 -07:00
|
|
|
render_main_texture(video);
|
2014-02-16 18:28:21 -08:00
|
|
|
|
2019-02-05 17:37:40 -08:00
|
|
|
if (raw_active || gpu_active) {
|
libobs: Rework RGB to YUV conversion
RGB to YUV converison was previously baked into every scale shader, but
this work has been moved to the YUV packing shaders. The scale shaders
now write RGBA instead. In the case where base and output resolutions
are identical, the render texture is forwarded directly to the YUV pack
step, skipping an entire fullscreen pass.
Intel GPA, SetStablePowerState, Intel HD Graphics 530, NV12
1920x1080, Before:
RGBA -> UYVX: ~321 us
UYVX -> Y: ~480 us
UYVX -> UV: ~127 us
1920x1080, After:
[forward render texture]
RGBA -> Y: ~487 us
RGBA -> UV: ~131 us
1920x1080 -> 1280x720, Before:
RGBA -> UYVX: ~268 us
UYVX -> Y: ~209 us
UYVX -> UV: ~57 us
1920x1080 -> 1280x720, After:
RGBA -> RGBA (rescale): ~268 us
RGBA -> Y: ~210 us
RGBA -> UV: ~58 us
2019-07-22 01:12:35 -07:00
|
|
|
gs_texture_t *texture = render_output_texture(video);
|
2018-10-05 20:18:15 -07:00
|
|
|
|
2019-02-05 17:37:40 -08:00
|
|
|
#ifdef _WIN32
|
2019-07-26 23:21:41 -07:00
|
|
|
if (gpu_active)
|
2019-02-05 17:37:40 -08:00
|
|
|
gs_flush();
|
|
|
|
#endif
|
|
|
|
|
2019-07-26 23:21:41 -07:00
|
|
|
if (video->gpu_conversion)
|
|
|
|
render_convert_texture(video, texture);
|
2018-01-31 18:54:36 -08:00
|
|
|
|
2019-02-05 17:37:40 -08:00
|
|
|
#ifdef _WIN32
|
|
|
|
if (gpu_active) {
|
|
|
|
gs_flush();
|
2019-05-24 01:03:21 -07:00
|
|
|
output_gpu_encoders(video, raw_active);
|
2019-02-05 17:37:40 -08:00
|
|
|
}
|
|
|
|
#endif
|
2019-07-26 23:21:41 -07:00
|
|
|
|
2019-02-05 17:37:40 -08:00
|
|
|
if (raw_active)
|
2019-07-26 23:21:41 -07:00
|
|
|
stage_output_texture(video, cur_texture);
|
2018-01-31 18:54:36 -08:00
|
|
|
}
|
2014-02-05 19:36:21 -08:00
|
|
|
|
2014-08-07 23:42:07 -07:00
|
|
|
gs_set_render_target(NULL, NULL);
|
2014-02-16 18:28:21 -08:00
|
|
|
gs_enable_blending(true);
|
2014-02-05 19:36:21 -08:00
|
|
|
|
2014-08-07 23:42:07 -07:00
|
|
|
gs_end_scene();
|
2014-02-05 19:36:21 -08:00
|
|
|
}
|
|
|
|
|
2014-02-14 14:13:36 -08:00
|
|
|
static inline bool download_frame(struct obs_core_video *video,
|
2019-06-22 22:13:45 -07:00
|
|
|
int prev_texture, struct video_data *frame)
|
2014-02-05 19:36:21 -08:00
|
|
|
{
|
|
|
|
if (!video->textures_copied[prev_texture])
|
2014-02-09 04:51:06 -08:00
|
|
|
return false;
|
2014-02-05 19:36:21 -08:00
|
|
|
|
2019-07-26 23:21:41 -07:00
|
|
|
for (int channel = 0; channel < NUM_CHANNELS; ++channel) {
|
|
|
|
gs_stagesurf_t *surface =
|
|
|
|
video->copy_surfaces[prev_texture][channel];
|
|
|
|
if (surface) {
|
|
|
|
if (!gs_stagesurface_map(surface, &frame->data[channel],
|
|
|
|
&frame->linesize[channel]))
|
|
|
|
return false;
|
2014-02-09 04:51:06 -08:00
|
|
|
|
2019-07-26 23:21:41 -07:00
|
|
|
video->mapped_surfaces[channel] = surface;
|
|
|
|
}
|
|
|
|
}
|
2014-02-09 04:51:06 -08:00
|
|
|
return true;
|
|
|
|
}
|
2014-02-05 19:36:21 -08:00
|
|
|
|
2019-07-26 23:21:41 -07:00
|
|
|
static const uint8_t *set_gpu_converted_plane(uint32_t width, uint32_t height,
|
|
|
|
uint32_t linesize_input,
|
|
|
|
uint32_t linesize_output,
|
|
|
|
const uint8_t *in, uint8_t *out)
|
2014-02-16 18:28:21 -08:00
|
|
|
{
|
2019-07-26 23:21:41 -07:00
|
|
|
if ((width == linesize_input) && (width == linesize_output)) {
|
2020-05-21 00:23:26 -07:00
|
|
|
size_t total = (size_t)width * (size_t)height;
|
2019-07-26 23:21:41 -07:00
|
|
|
memcpy(out, in, total);
|
|
|
|
in += total;
|
|
|
|
} else {
|
|
|
|
for (size_t y = 0; y < height; y++) {
|
|
|
|
memcpy(out, in, width);
|
|
|
|
out += linesize_output;
|
|
|
|
in += linesize_input;
|
2014-02-16 18:28:21 -08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-07-26 23:21:41 -07:00
|
|
|
return in;
|
2014-02-16 18:28:21 -08:00
|
|
|
}
|
|
|
|
|
libobs: Redesign/optimize frame encoding handling
Previously, the design for the interaction between the encoder thread
and the graphics thread was that the encoder thread would signal to the
graphics thread when to start drawing each frame. The original idea
behind this was to prevent mutually cascading stalls of encoding or
graphics rendering (i.e., if rendering took too long, then encoding
would have to catch up, then rendering would have to catch up again, and
so on, cascading upon each other). The ultimate goal was to prevent
encoding from impacting graphics and vise versa.
However, eventually it was realized that there were some fundamental
flaws with this design.
1. Stray frame duplication. You could not guarantee that a frame would
render on time, so sometimes frames would unintentionally be lost if
there was any sort of minor hiccup or if the thread took too long to
be scheduled I'm guessing.
2. Frame timing in the rendering thread was less accurate. The only
place where frame timing was accurate was in the encoder thread, and
the graphics thread was at the whim of thread scheduling. On higher
end computers it was typically fine, but it was just generally not
guaranteed that a frame would be rendered when it was supposed to be
rendered.
So the solution (originally proposed by r1ch and paibox) is to instead
keep the encoding and graphics threads separate as usual, but instead of
the encoder thread controlling the graphics thread, the graphics thread
now controls the encoder thread. The encoder thread keeps a limited
cache of frames, then the graphics thread copies frames in to the cache
and increments a semaphore to schedule the encoder thread to encode that
data.
In the cache, each frame has an encode counter. If the frame cache is
full (e.g., the encoder taking too long to return frames), it will not
cache a new frame, but instead will just increment the counter on the
last frame in the cache to schedule that frame to encode again, ensuring
that frames are on time and reducing CPU usage by lowering video
complexity. If the graphics thread takes too long to render a frame,
then it will add that frame with the count value set to the total amount
of frames that were missed (actual legitimately duplicated frames).
Because the cache gives many frames of breathing room for the encoder to
encode frames, this design helps improve results especially when using
encoding presets that have higher complexity and CPU usage, minimizing
the risk of needlessly skipped or duplicated frames.
I also managed to sneak in what should be a bit of an optimization to
reduce copying of frame data, though how much of an optimization it
ultimately ends up being is debatable.
So to sum it up, this commit increases accuracy of frame timing,
completely removes stray frame duplication, gives better results for
higher complexity encoding presets, and potentially optimizes the frame
pipeline a tiny bit.
2014-12-31 01:53:13 -08:00
|
|
|
static void set_gpu_converted_data(struct obs_core_video *video,
|
2019-06-22 22:13:45 -07:00
|
|
|
struct video_frame *output,
|
|
|
|
const struct video_data *input,
|
|
|
|
const struct video_output_info *info)
|
2014-02-16 18:28:21 -08:00
|
|
|
{
|
2019-07-26 23:21:41 -07:00
|
|
|
if (video->using_nv12_tex) {
|
|
|
|
const uint32_t width = info->width;
|
|
|
|
const uint32_t height = info->height;
|
|
|
|
|
|
|
|
const uint8_t *const in_uv = set_gpu_converted_plane(
|
|
|
|
width, height, input->linesize[0], output->linesize[0],
|
|
|
|
input->data[0], output->data[0]);
|
|
|
|
|
|
|
|
const uint32_t height_d2 = height / 2;
|
|
|
|
set_gpu_converted_plane(width, height_d2, input->linesize[0],
|
|
|
|
output->linesize[1], in_uv,
|
|
|
|
output->data[1]);
|
|
|
|
} else {
|
|
|
|
switch (info->format) {
|
|
|
|
case VIDEO_FORMAT_I420: {
|
|
|
|
const uint32_t width = info->width;
|
|
|
|
const uint32_t height = info->height;
|
|
|
|
|
|
|
|
set_gpu_converted_plane(width, height,
|
|
|
|
input->linesize[0],
|
|
|
|
output->linesize[0],
|
|
|
|
input->data[0],
|
|
|
|
output->data[0]);
|
|
|
|
|
|
|
|
const uint32_t width_d2 = width / 2;
|
|
|
|
const uint32_t height_d2 = height / 2;
|
|
|
|
|
|
|
|
set_gpu_converted_plane(width_d2, height_d2,
|
|
|
|
input->linesize[1],
|
|
|
|
output->linesize[1],
|
|
|
|
input->data[1],
|
|
|
|
output->data[1]);
|
|
|
|
|
|
|
|
set_gpu_converted_plane(width_d2, height_d2,
|
|
|
|
input->linesize[2],
|
|
|
|
output->linesize[2],
|
|
|
|
input->data[2],
|
|
|
|
output->data[2]);
|
2014-02-16 18:28:21 -08:00
|
|
|
|
2019-07-26 23:21:41 -07:00
|
|
|
break;
|
2014-02-16 18:28:21 -08:00
|
|
|
}
|
2019-07-26 23:21:41 -07:00
|
|
|
case VIDEO_FORMAT_NV12: {
|
|
|
|
const uint32_t width = info->width;
|
|
|
|
const uint32_t height = info->height;
|
|
|
|
|
|
|
|
set_gpu_converted_plane(width, height,
|
|
|
|
input->linesize[0],
|
|
|
|
output->linesize[0],
|
|
|
|
input->data[0],
|
|
|
|
output->data[0]);
|
|
|
|
|
|
|
|
const uint32_t height_d2 = height / 2;
|
|
|
|
set_gpu_converted_plane(width, height_d2,
|
|
|
|
input->linesize[1],
|
|
|
|
output->linesize[1],
|
|
|
|
input->data[1],
|
|
|
|
output->data[1]);
|
2014-02-16 18:28:21 -08:00
|
|
|
|
2019-07-26 23:21:41 -07:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
case VIDEO_FORMAT_I444: {
|
|
|
|
const uint32_t width = info->width;
|
|
|
|
const uint32_t height = info->height;
|
|
|
|
|
|
|
|
set_gpu_converted_plane(width, height,
|
|
|
|
input->linesize[0],
|
|
|
|
output->linesize[0],
|
|
|
|
input->data[0],
|
|
|
|
output->data[0]);
|
|
|
|
|
|
|
|
set_gpu_converted_plane(width, height,
|
|
|
|
input->linesize[1],
|
|
|
|
output->linesize[1],
|
|
|
|
input->data[1],
|
|
|
|
output->data[1]);
|
|
|
|
|
|
|
|
set_gpu_converted_plane(width, height,
|
|
|
|
input->linesize[2],
|
|
|
|
output->linesize[2],
|
|
|
|
input->data[2],
|
|
|
|
output->data[2]);
|
2018-10-05 20:18:15 -07:00
|
|
|
|
2019-07-26 23:21:41 -07:00
|
|
|
break;
|
2018-10-05 20:18:15 -07:00
|
|
|
}
|
2019-08-30 22:13:03 -07:00
|
|
|
|
|
|
|
case VIDEO_FORMAT_NONE:
|
|
|
|
case VIDEO_FORMAT_YVYU:
|
|
|
|
case VIDEO_FORMAT_YUY2:
|
|
|
|
case VIDEO_FORMAT_UYVY:
|
|
|
|
case VIDEO_FORMAT_RGBA:
|
|
|
|
case VIDEO_FORMAT_BGRA:
|
|
|
|
case VIDEO_FORMAT_BGRX:
|
|
|
|
case VIDEO_FORMAT_Y800:
|
|
|
|
case VIDEO_FORMAT_BGR3:
|
|
|
|
case VIDEO_FORMAT_I422:
|
|
|
|
case VIDEO_FORMAT_I40A:
|
|
|
|
case VIDEO_FORMAT_I42A:
|
|
|
|
case VIDEO_FORMAT_YUVA:
|
|
|
|
case VIDEO_FORMAT_AYUV:
|
|
|
|
/* unimplemented */
|
|
|
|
;
|
2018-10-05 20:18:15 -07:00
|
|
|
}
|
2014-02-16 18:28:21 -08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-06-22 22:13:45 -07:00
|
|
|
static inline void copy_rgbx_frame(struct video_frame *output,
|
|
|
|
const struct video_data *input,
|
|
|
|
const struct video_output_info *info)
|
2015-04-15 18:41:09 -07:00
|
|
|
{
|
|
|
|
uint8_t *in_ptr = input->data[0];
|
|
|
|
uint8_t *out_ptr = output->data[0];
|
|
|
|
|
2015-07-21 08:58:37 -07:00
|
|
|
/* if the line sizes match, do a single copy */
|
|
|
|
if (input->linesize[0] == output->linesize[0]) {
|
2020-05-21 00:23:26 -07:00
|
|
|
memcpy(out_ptr, in_ptr,
|
|
|
|
(size_t)input->linesize[0] * (size_t)info->height);
|
2015-07-21 08:58:37 -07:00
|
|
|
} else {
|
2020-05-21 00:23:26 -07:00
|
|
|
const size_t copy_size = (size_t)info->width * 4;
|
2015-07-21 08:58:37 -07:00
|
|
|
for (size_t y = 0; y < info->height; y++) {
|
2020-05-21 00:23:26 -07:00
|
|
|
memcpy(out_ptr, in_ptr, copy_size);
|
2015-07-21 08:58:37 -07:00
|
|
|
in_ptr += input->linesize[0];
|
|
|
|
out_ptr += output->linesize[0];
|
|
|
|
}
|
2015-04-15 18:41:09 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-02-09 04:51:06 -08:00
|
|
|
static inline void output_video_data(struct obs_core_video *video,
|
2019-06-22 22:13:45 -07:00
|
|
|
struct video_data *input_frame, int count)
|
2014-02-09 04:51:06 -08:00
|
|
|
{
|
|
|
|
const struct video_output_info *info;
|
libobs: Redesign/optimize frame encoding handling
Previously, the design for the interaction between the encoder thread
and the graphics thread was that the encoder thread would signal to the
graphics thread when to start drawing each frame. The original idea
behind this was to prevent mutually cascading stalls of encoding or
graphics rendering (i.e., if rendering took too long, then encoding
would have to catch up, then rendering would have to catch up again, and
so on, cascading upon each other). The ultimate goal was to prevent
encoding from impacting graphics and vise versa.
However, eventually it was realized that there were some fundamental
flaws with this design.
1. Stray frame duplication. You could not guarantee that a frame would
render on time, so sometimes frames would unintentionally be lost if
there was any sort of minor hiccup or if the thread took too long to
be scheduled I'm guessing.
2. Frame timing in the rendering thread was less accurate. The only
place where frame timing was accurate was in the encoder thread, and
the graphics thread was at the whim of thread scheduling. On higher
end computers it was typically fine, but it was just generally not
guaranteed that a frame would be rendered when it was supposed to be
rendered.
So the solution (originally proposed by r1ch and paibox) is to instead
keep the encoding and graphics threads separate as usual, but instead of
the encoder thread controlling the graphics thread, the graphics thread
now controls the encoder thread. The encoder thread keeps a limited
cache of frames, then the graphics thread copies frames in to the cache
and increments a semaphore to schedule the encoder thread to encode that
data.
In the cache, each frame has an encode counter. If the frame cache is
full (e.g., the encoder taking too long to return frames), it will not
cache a new frame, but instead will just increment the counter on the
last frame in the cache to schedule that frame to encode again, ensuring
that frames are on time and reducing CPU usage by lowering video
complexity. If the graphics thread takes too long to render a frame,
then it will add that frame with the count value set to the total amount
of frames that were missed (actual legitimately duplicated frames).
Because the cache gives many frames of breathing room for the encoder to
encode frames, this design helps improve results especially when using
encoding presets that have higher complexity and CPU usage, minimizing
the risk of needlessly skipped or duplicated frames.
I also managed to sneak in what should be a bit of an optimization to
reduce copying of frame data, though how much of an optimization it
ultimately ends up being is debatable.
So to sum it up, this commit increases accuracy of frame timing,
completely removes stray frame duplication, gives better results for
higher complexity encoding presets, and potentially optimizes the frame
pipeline a tiny bit.
2014-12-31 01:53:13 -08:00
|
|
|
struct video_frame output_frame;
|
|
|
|
bool locked;
|
|
|
|
|
2014-08-05 15:07:54 -07:00
|
|
|
info = video_output_get_info(video->video);
|
2014-02-09 04:51:06 -08:00
|
|
|
|
libobs: Redesign/optimize frame encoding handling
Previously, the design for the interaction between the encoder thread
and the graphics thread was that the encoder thread would signal to the
graphics thread when to start drawing each frame. The original idea
behind this was to prevent mutually cascading stalls of encoding or
graphics rendering (i.e., if rendering took too long, then encoding
would have to catch up, then rendering would have to catch up again, and
so on, cascading upon each other). The ultimate goal was to prevent
encoding from impacting graphics and vise versa.
However, eventually it was realized that there were some fundamental
flaws with this design.
1. Stray frame duplication. You could not guarantee that a frame would
render on time, so sometimes frames would unintentionally be lost if
there was any sort of minor hiccup or if the thread took too long to
be scheduled I'm guessing.
2. Frame timing in the rendering thread was less accurate. The only
place where frame timing was accurate was in the encoder thread, and
the graphics thread was at the whim of thread scheduling. On higher
end computers it was typically fine, but it was just generally not
guaranteed that a frame would be rendered when it was supposed to be
rendered.
So the solution (originally proposed by r1ch and paibox) is to instead
keep the encoding and graphics threads separate as usual, but instead of
the encoder thread controlling the graphics thread, the graphics thread
now controls the encoder thread. The encoder thread keeps a limited
cache of frames, then the graphics thread copies frames in to the cache
and increments a semaphore to schedule the encoder thread to encode that
data.
In the cache, each frame has an encode counter. If the frame cache is
full (e.g., the encoder taking too long to return frames), it will not
cache a new frame, but instead will just increment the counter on the
last frame in the cache to schedule that frame to encode again, ensuring
that frames are on time and reducing CPU usage by lowering video
complexity. If the graphics thread takes too long to render a frame,
then it will add that frame with the count value set to the total amount
of frames that were missed (actual legitimately duplicated frames).
Because the cache gives many frames of breathing room for the encoder to
encode frames, this design helps improve results especially when using
encoding presets that have higher complexity and CPU usage, minimizing
the risk of needlessly skipped or duplicated frames.
I also managed to sneak in what should be a bit of an optimization to
reduce copying of frame data, though how much of an optimization it
ultimately ends up being is debatable.
So to sum it up, this commit increases accuracy of frame timing,
completely removes stray frame duplication, gives better results for
higher complexity encoding presets, and potentially optimizes the frame
pipeline a tiny bit.
2014-12-31 01:53:13 -08:00
|
|
|
locked = video_output_lock_frame(video->video, &output_frame, count,
|
2019-06-22 22:13:45 -07:00
|
|
|
input_frame->timestamp);
|
libobs: Redesign/optimize frame encoding handling
Previously, the design for the interaction between the encoder thread
and the graphics thread was that the encoder thread would signal to the
graphics thread when to start drawing each frame. The original idea
behind this was to prevent mutually cascading stalls of encoding or
graphics rendering (i.e., if rendering took too long, then encoding
would have to catch up, then rendering would have to catch up again, and
so on, cascading upon each other). The ultimate goal was to prevent
encoding from impacting graphics and vise versa.
However, eventually it was realized that there were some fundamental
flaws with this design.
1. Stray frame duplication. You could not guarantee that a frame would
render on time, so sometimes frames would unintentionally be lost if
there was any sort of minor hiccup or if the thread took too long to
be scheduled I'm guessing.
2. Frame timing in the rendering thread was less accurate. The only
place where frame timing was accurate was in the encoder thread, and
the graphics thread was at the whim of thread scheduling. On higher
end computers it was typically fine, but it was just generally not
guaranteed that a frame would be rendered when it was supposed to be
rendered.
So the solution (originally proposed by r1ch and paibox) is to instead
keep the encoding and graphics threads separate as usual, but instead of
the encoder thread controlling the graphics thread, the graphics thread
now controls the encoder thread. The encoder thread keeps a limited
cache of frames, then the graphics thread copies frames in to the cache
and increments a semaphore to schedule the encoder thread to encode that
data.
In the cache, each frame has an encode counter. If the frame cache is
full (e.g., the encoder taking too long to return frames), it will not
cache a new frame, but instead will just increment the counter on the
last frame in the cache to schedule that frame to encode again, ensuring
that frames are on time and reducing CPU usage by lowering video
complexity. If the graphics thread takes too long to render a frame,
then it will add that frame with the count value set to the total amount
of frames that were missed (actual legitimately duplicated frames).
Because the cache gives many frames of breathing room for the encoder to
encode frames, this design helps improve results especially when using
encoding presets that have higher complexity and CPU usage, minimizing
the risk of needlessly skipped or duplicated frames.
I also managed to sneak in what should be a bit of an optimization to
reduce copying of frame data, though how much of an optimization it
ultimately ends up being is debatable.
So to sum it up, this commit increases accuracy of frame timing,
completely removes stray frame duplication, gives better results for
higher complexity encoding presets, and potentially optimizes the frame
pipeline a tiny bit.
2014-12-31 01:53:13 -08:00
|
|
|
if (locked) {
|
|
|
|
if (video->gpu_conversion) {
|
|
|
|
set_gpu_converted_data(video, &output_frame,
|
2019-06-22 22:13:45 -07:00
|
|
|
input_frame, info);
|
2015-04-15 18:41:09 -07:00
|
|
|
} else {
|
|
|
|
copy_rgbx_frame(&output_frame, input_frame, info);
|
libobs: Redesign/optimize frame encoding handling
Previously, the design for the interaction between the encoder thread
and the graphics thread was that the encoder thread would signal to the
graphics thread when to start drawing each frame. The original idea
behind this was to prevent mutually cascading stalls of encoding or
graphics rendering (i.e., if rendering took too long, then encoding
would have to catch up, then rendering would have to catch up again, and
so on, cascading upon each other). The ultimate goal was to prevent
encoding from impacting graphics and vise versa.
However, eventually it was realized that there were some fundamental
flaws with this design.
1. Stray frame duplication. You could not guarantee that a frame would
render on time, so sometimes frames would unintentionally be lost if
there was any sort of minor hiccup or if the thread took too long to
be scheduled I'm guessing.
2. Frame timing in the rendering thread was less accurate. The only
place where frame timing was accurate was in the encoder thread, and
the graphics thread was at the whim of thread scheduling. On higher
end computers it was typically fine, but it was just generally not
guaranteed that a frame would be rendered when it was supposed to be
rendered.
So the solution (originally proposed by r1ch and paibox) is to instead
keep the encoding and graphics threads separate as usual, but instead of
the encoder thread controlling the graphics thread, the graphics thread
now controls the encoder thread. The encoder thread keeps a limited
cache of frames, then the graphics thread copies frames in to the cache
and increments a semaphore to schedule the encoder thread to encode that
data.
In the cache, each frame has an encode counter. If the frame cache is
full (e.g., the encoder taking too long to return frames), it will not
cache a new frame, but instead will just increment the counter on the
last frame in the cache to schedule that frame to encode again, ensuring
that frames are on time and reducing CPU usage by lowering video
complexity. If the graphics thread takes too long to render a frame,
then it will add that frame with the count value set to the total amount
of frames that were missed (actual legitimately duplicated frames).
Because the cache gives many frames of breathing room for the encoder to
encode frames, this design helps improve results especially when using
encoding presets that have higher complexity and CPU usage, minimizing
the risk of needlessly skipped or duplicated frames.
I also managed to sneak in what should be a bit of an optimization to
reduce copying of frame data, though how much of an optimization it
ultimately ends up being is debatable.
So to sum it up, this commit increases accuracy of frame timing,
completely removes stray frame duplication, gives better results for
higher complexity encoding presets, and potentially optimizes the frame
pipeline a tiny bit.
2014-12-31 01:53:13 -08:00
|
|
|
}
|
2014-02-16 18:28:21 -08:00
|
|
|
|
libobs: Redesign/optimize frame encoding handling
Previously, the design for the interaction between the encoder thread
and the graphics thread was that the encoder thread would signal to the
graphics thread when to start drawing each frame. The original idea
behind this was to prevent mutually cascading stalls of encoding or
graphics rendering (i.e., if rendering took too long, then encoding
would have to catch up, then rendering would have to catch up again, and
so on, cascading upon each other). The ultimate goal was to prevent
encoding from impacting graphics and vise versa.
However, eventually it was realized that there were some fundamental
flaws with this design.
1. Stray frame duplication. You could not guarantee that a frame would
render on time, so sometimes frames would unintentionally be lost if
there was any sort of minor hiccup or if the thread took too long to
be scheduled I'm guessing.
2. Frame timing in the rendering thread was less accurate. The only
place where frame timing was accurate was in the encoder thread, and
the graphics thread was at the whim of thread scheduling. On higher
end computers it was typically fine, but it was just generally not
guaranteed that a frame would be rendered when it was supposed to be
rendered.
So the solution (originally proposed by r1ch and paibox) is to instead
keep the encoding and graphics threads separate as usual, but instead of
the encoder thread controlling the graphics thread, the graphics thread
now controls the encoder thread. The encoder thread keeps a limited
cache of frames, then the graphics thread copies frames in to the cache
and increments a semaphore to schedule the encoder thread to encode that
data.
In the cache, each frame has an encode counter. If the frame cache is
full (e.g., the encoder taking too long to return frames), it will not
cache a new frame, but instead will just increment the counter on the
last frame in the cache to schedule that frame to encode again, ensuring
that frames are on time and reducing CPU usage by lowering video
complexity. If the graphics thread takes too long to render a frame,
then it will add that frame with the count value set to the total amount
of frames that were missed (actual legitimately duplicated frames).
Because the cache gives many frames of breathing room for the encoder to
encode frames, this design helps improve results especially when using
encoding presets that have higher complexity and CPU usage, minimizing
the risk of needlessly skipped or duplicated frames.
I also managed to sneak in what should be a bit of an optimization to
reduce copying of frame data, though how much of an optimization it
ultimately ends up being is debatable.
So to sum it up, this commit increases accuracy of frame timing,
completely removes stray frame duplication, gives better results for
higher complexity encoding presets, and potentially optimizes the frame
pipeline a tiny bit.
2014-12-31 01:53:13 -08:00
|
|
|
video_output_unlock_frame(video->video);
|
2014-02-16 18:28:21 -08:00
|
|
|
}
|
libobs: Redesign/optimize frame encoding handling
Previously, the design for the interaction between the encoder thread
and the graphics thread was that the encoder thread would signal to the
graphics thread when to start drawing each frame. The original idea
behind this was to prevent mutually cascading stalls of encoding or
graphics rendering (i.e., if rendering took too long, then encoding
would have to catch up, then rendering would have to catch up again, and
so on, cascading upon each other). The ultimate goal was to prevent
encoding from impacting graphics and vise versa.
However, eventually it was realized that there were some fundamental
flaws with this design.
1. Stray frame duplication. You could not guarantee that a frame would
render on time, so sometimes frames would unintentionally be lost if
there was any sort of minor hiccup or if the thread took too long to
be scheduled I'm guessing.
2. Frame timing in the rendering thread was less accurate. The only
place where frame timing was accurate was in the encoder thread, and
the graphics thread was at the whim of thread scheduling. On higher
end computers it was typically fine, but it was just generally not
guaranteed that a frame would be rendered when it was supposed to be
rendered.
So the solution (originally proposed by r1ch and paibox) is to instead
keep the encoding and graphics threads separate as usual, but instead of
the encoder thread controlling the graphics thread, the graphics thread
now controls the encoder thread. The encoder thread keeps a limited
cache of frames, then the graphics thread copies frames in to the cache
and increments a semaphore to schedule the encoder thread to encode that
data.
In the cache, each frame has an encode counter. If the frame cache is
full (e.g., the encoder taking too long to return frames), it will not
cache a new frame, but instead will just increment the counter on the
last frame in the cache to schedule that frame to encode again, ensuring
that frames are on time and reducing CPU usage by lowering video
complexity. If the graphics thread takes too long to render a frame,
then it will add that frame with the count value set to the total amount
of frames that were missed (actual legitimately duplicated frames).
Because the cache gives many frames of breathing room for the encoder to
encode frames, this design helps improve results especially when using
encoding presets that have higher complexity and CPU usage, minimizing
the risk of needlessly skipped or duplicated frames.
I also managed to sneak in what should be a bit of an optimization to
reduce copying of frame data, though how much of an optimization it
ultimately ends up being is debatable.
So to sum it up, this commit increases accuracy of frame timing,
completely removes stray frame duplication, gives better results for
higher complexity encoding presets, and potentially optimizes the frame
pipeline a tiny bit.
2014-12-31 01:53:13 -08:00
|
|
|
}
|
2014-02-09 04:51:06 -08:00
|
|
|
|
2019-06-22 22:13:45 -07:00
|
|
|
static inline void video_sleep(struct obs_core_video *video, bool raw_active,
|
|
|
|
const bool gpu_active, uint64_t *p_time,
|
|
|
|
uint64_t interval_ns)
|
libobs: Redesign/optimize frame encoding handling
Previously, the design for the interaction between the encoder thread
and the graphics thread was that the encoder thread would signal to the
graphics thread when to start drawing each frame. The original idea
behind this was to prevent mutually cascading stalls of encoding or
graphics rendering (i.e., if rendering took too long, then encoding
would have to catch up, then rendering would have to catch up again, and
so on, cascading upon each other). The ultimate goal was to prevent
encoding from impacting graphics and vise versa.
However, eventually it was realized that there were some fundamental
flaws with this design.
1. Stray frame duplication. You could not guarantee that a frame would
render on time, so sometimes frames would unintentionally be lost if
there was any sort of minor hiccup or if the thread took too long to
be scheduled I'm guessing.
2. Frame timing in the rendering thread was less accurate. The only
place where frame timing was accurate was in the encoder thread, and
the graphics thread was at the whim of thread scheduling. On higher
end computers it was typically fine, but it was just generally not
guaranteed that a frame would be rendered when it was supposed to be
rendered.
So the solution (originally proposed by r1ch and paibox) is to instead
keep the encoding and graphics threads separate as usual, but instead of
the encoder thread controlling the graphics thread, the graphics thread
now controls the encoder thread. The encoder thread keeps a limited
cache of frames, then the graphics thread copies frames in to the cache
and increments a semaphore to schedule the encoder thread to encode that
data.
In the cache, each frame has an encode counter. If the frame cache is
full (e.g., the encoder taking too long to return frames), it will not
cache a new frame, but instead will just increment the counter on the
last frame in the cache to schedule that frame to encode again, ensuring
that frames are on time and reducing CPU usage by lowering video
complexity. If the graphics thread takes too long to render a frame,
then it will add that frame with the count value set to the total amount
of frames that were missed (actual legitimately duplicated frames).
Because the cache gives many frames of breathing room for the encoder to
encode frames, this design helps improve results especially when using
encoding presets that have higher complexity and CPU usage, minimizing
the risk of needlessly skipped or duplicated frames.
I also managed to sneak in what should be a bit of an optimization to
reduce copying of frame data, though how much of an optimization it
ultimately ends up being is debatable.
So to sum it up, this commit increases accuracy of frame timing,
completely removes stray frame duplication, gives better results for
higher complexity encoding presets, and potentially optimizes the frame
pipeline a tiny bit.
2014-12-31 01:53:13 -08:00
|
|
|
{
|
|
|
|
struct obs_vframe_info vframe_info;
|
|
|
|
uint64_t cur_time = *p_time;
|
|
|
|
uint64_t t = cur_time + interval_ns;
|
|
|
|
int count;
|
|
|
|
|
2015-01-05 14:07:22 -08:00
|
|
|
if (os_sleepto_ns(t)) {
|
libobs: Redesign/optimize frame encoding handling
Previously, the design for the interaction between the encoder thread
and the graphics thread was that the encoder thread would signal to the
graphics thread when to start drawing each frame. The original idea
behind this was to prevent mutually cascading stalls of encoding or
graphics rendering (i.e., if rendering took too long, then encoding
would have to catch up, then rendering would have to catch up again, and
so on, cascading upon each other). The ultimate goal was to prevent
encoding from impacting graphics and vise versa.
However, eventually it was realized that there were some fundamental
flaws with this design.
1. Stray frame duplication. You could not guarantee that a frame would
render on time, so sometimes frames would unintentionally be lost if
there was any sort of minor hiccup or if the thread took too long to
be scheduled I'm guessing.
2. Frame timing in the rendering thread was less accurate. The only
place where frame timing was accurate was in the encoder thread, and
the graphics thread was at the whim of thread scheduling. On higher
end computers it was typically fine, but it was just generally not
guaranteed that a frame would be rendered when it was supposed to be
rendered.
So the solution (originally proposed by r1ch and paibox) is to instead
keep the encoding and graphics threads separate as usual, but instead of
the encoder thread controlling the graphics thread, the graphics thread
now controls the encoder thread. The encoder thread keeps a limited
cache of frames, then the graphics thread copies frames in to the cache
and increments a semaphore to schedule the encoder thread to encode that
data.
In the cache, each frame has an encode counter. If the frame cache is
full (e.g., the encoder taking too long to return frames), it will not
cache a new frame, but instead will just increment the counter on the
last frame in the cache to schedule that frame to encode again, ensuring
that frames are on time and reducing CPU usage by lowering video
complexity. If the graphics thread takes too long to render a frame,
then it will add that frame with the count value set to the total amount
of frames that were missed (actual legitimately duplicated frames).
Because the cache gives many frames of breathing room for the encoder to
encode frames, this design helps improve results especially when using
encoding presets that have higher complexity and CPU usage, minimizing
the risk of needlessly skipped or duplicated frames.
I also managed to sneak in what should be a bit of an optimization to
reduce copying of frame data, though how much of an optimization it
ultimately ends up being is debatable.
So to sum it up, this commit increases accuracy of frame timing,
completely removes stray frame duplication, gives better results for
higher complexity encoding presets, and potentially optimizes the frame
pipeline a tiny bit.
2014-12-31 01:53:13 -08:00
|
|
|
*p_time = t;
|
|
|
|
count = 1;
|
|
|
|
} else {
|
|
|
|
count = (int)((os_gettime_ns() - cur_time) / interval_ns);
|
|
|
|
*p_time = cur_time + interval_ns * count;
|
|
|
|
}
|
|
|
|
|
2016-01-25 03:58:51 -08:00
|
|
|
video->total_frames += count;
|
|
|
|
video->lagged_frames += count - 1;
|
|
|
|
|
libobs: Redesign/optimize frame encoding handling
Previously, the design for the interaction between the encoder thread
and the graphics thread was that the encoder thread would signal to the
graphics thread when to start drawing each frame. The original idea
behind this was to prevent mutually cascading stalls of encoding or
graphics rendering (i.e., if rendering took too long, then encoding
would have to catch up, then rendering would have to catch up again, and
so on, cascading upon each other). The ultimate goal was to prevent
encoding from impacting graphics and vise versa.
However, eventually it was realized that there were some fundamental
flaws with this design.
1. Stray frame duplication. You could not guarantee that a frame would
render on time, so sometimes frames would unintentionally be lost if
there was any sort of minor hiccup or if the thread took too long to
be scheduled I'm guessing.
2. Frame timing in the rendering thread was less accurate. The only
place where frame timing was accurate was in the encoder thread, and
the graphics thread was at the whim of thread scheduling. On higher
end computers it was typically fine, but it was just generally not
guaranteed that a frame would be rendered when it was supposed to be
rendered.
So the solution (originally proposed by r1ch and paibox) is to instead
keep the encoding and graphics threads separate as usual, but instead of
the encoder thread controlling the graphics thread, the graphics thread
now controls the encoder thread. The encoder thread keeps a limited
cache of frames, then the graphics thread copies frames in to the cache
and increments a semaphore to schedule the encoder thread to encode that
data.
In the cache, each frame has an encode counter. If the frame cache is
full (e.g., the encoder taking too long to return frames), it will not
cache a new frame, but instead will just increment the counter on the
last frame in the cache to schedule that frame to encode again, ensuring
that frames are on time and reducing CPU usage by lowering video
complexity. If the graphics thread takes too long to render a frame,
then it will add that frame with the count value set to the total amount
of frames that were missed (actual legitimately duplicated frames).
Because the cache gives many frames of breathing room for the encoder to
encode frames, this design helps improve results especially when using
encoding presets that have higher complexity and CPU usage, minimizing
the risk of needlessly skipped or duplicated frames.
I also managed to sneak in what should be a bit of an optimization to
reduce copying of frame data, though how much of an optimization it
ultimately ends up being is debatable.
So to sum it up, this commit increases accuracy of frame timing,
completely removes stray frame duplication, gives better results for
higher complexity encoding presets, and potentially optimizes the frame
pipeline a tiny bit.
2014-12-31 01:53:13 -08:00
|
|
|
vframe_info.timestamp = cur_time;
|
|
|
|
vframe_info.count = count;
|
2019-02-05 17:37:40 -08:00
|
|
|
|
|
|
|
if (raw_active)
|
2018-01-31 18:54:36 -08:00
|
|
|
circlebuf_push_back(&video->vframe_info_buffer, &vframe_info,
|
2019-06-22 22:13:45 -07:00
|
|
|
sizeof(vframe_info));
|
2019-02-05 17:37:40 -08:00
|
|
|
if (gpu_active)
|
|
|
|
circlebuf_push_back(&video->vframe_info_buffer_gpu,
|
2019-06-22 22:13:45 -07:00
|
|
|
&vframe_info, sizeof(vframe_info));
|
2014-02-05 19:36:21 -08:00
|
|
|
}
|
|
|
|
|
2015-07-10 23:04:46 -07:00
|
|
|
static const char *output_frame_gs_context_name = "gs_context(video->graphics)";
|
|
|
|
static const char *output_frame_render_video_name = "render_video";
|
|
|
|
static const char *output_frame_download_frame_name = "download_frame";
|
|
|
|
static const char *output_frame_gs_flush_name = "gs_flush";
|
|
|
|
static const char *output_frame_output_video_data_name = "output_video_data";
|
2019-02-05 17:37:40 -08:00
|
|
|
static inline void output_frame(bool raw_active, const bool gpu_active)
|
2014-02-05 19:36:21 -08:00
|
|
|
{
|
2014-02-05 20:03:06 -08:00
|
|
|
struct obs_core_video *video = &obs->video;
|
2019-06-22 22:13:45 -07:00
|
|
|
int cur_texture = video->cur_texture;
|
|
|
|
int prev_texture = cur_texture == 0 ? NUM_TEXTURES - 1
|
|
|
|
: cur_texture - 1;
|
2014-02-18 12:37:56 -08:00
|
|
|
struct video_data frame;
|
2019-02-20 14:26:21 -08:00
|
|
|
bool frame_ready = 0;
|
2014-02-09 04:51:06 -08:00
|
|
|
|
2014-02-18 12:37:56 -08:00
|
|
|
memset(&frame, 0, sizeof(struct video_data));
|
2014-02-09 04:51:06 -08:00
|
|
|
|
2015-07-10 23:04:46 -07:00
|
|
|
profile_start(output_frame_gs_context_name);
|
2014-08-07 23:42:07 -07:00
|
|
|
gs_enter_context(video->graphics);
|
2015-07-10 23:04:46 -07:00
|
|
|
|
|
|
|
profile_start(output_frame_render_video_name);
|
2019-04-02 23:23:37 -07:00
|
|
|
GS_DEBUG_MARKER_BEGIN(GS_DEBUG_COLOR_RENDER_VIDEO,
|
2019-06-22 22:13:45 -07:00
|
|
|
output_frame_render_video_name);
|
2019-05-24 01:03:21 -07:00
|
|
|
render_video(video, raw_active, gpu_active, cur_texture);
|
2019-04-02 23:23:37 -07:00
|
|
|
GS_DEBUG_MARKER_END();
|
2015-07-10 23:04:46 -07:00
|
|
|
profile_end(output_frame_render_video_name);
|
|
|
|
|
2018-01-31 18:54:36 -08:00
|
|
|
if (raw_active) {
|
|
|
|
profile_start(output_frame_download_frame_name);
|
|
|
|
frame_ready = download_frame(video, prev_texture, &frame);
|
|
|
|
profile_end(output_frame_download_frame_name);
|
|
|
|
}
|
2015-07-10 23:04:46 -07:00
|
|
|
|
|
|
|
profile_start(output_frame_gs_flush_name);
|
2014-12-03 22:14:23 -08:00
|
|
|
gs_flush();
|
2015-07-10 23:04:46 -07:00
|
|
|
profile_end(output_frame_gs_flush_name);
|
|
|
|
|
2014-08-07 23:42:07 -07:00
|
|
|
gs_leave_context();
|
2015-07-10 23:04:46 -07:00
|
|
|
profile_end(output_frame_gs_context_name);
|
2014-02-09 04:51:06 -08:00
|
|
|
|
2018-01-31 18:54:36 -08:00
|
|
|
if (raw_active && frame_ready) {
|
libobs: Redesign/optimize frame encoding handling
Previously, the design for the interaction between the encoder thread
and the graphics thread was that the encoder thread would signal to the
graphics thread when to start drawing each frame. The original idea
behind this was to prevent mutually cascading stalls of encoding or
graphics rendering (i.e., if rendering took too long, then encoding
would have to catch up, then rendering would have to catch up again, and
so on, cascading upon each other). The ultimate goal was to prevent
encoding from impacting graphics and vise versa.
However, eventually it was realized that there were some fundamental
flaws with this design.
1. Stray frame duplication. You could not guarantee that a frame would
render on time, so sometimes frames would unintentionally be lost if
there was any sort of minor hiccup or if the thread took too long to
be scheduled I'm guessing.
2. Frame timing in the rendering thread was less accurate. The only
place where frame timing was accurate was in the encoder thread, and
the graphics thread was at the whim of thread scheduling. On higher
end computers it was typically fine, but it was just generally not
guaranteed that a frame would be rendered when it was supposed to be
rendered.
So the solution (originally proposed by r1ch and paibox) is to instead
keep the encoding and graphics threads separate as usual, but instead of
the encoder thread controlling the graphics thread, the graphics thread
now controls the encoder thread. The encoder thread keeps a limited
cache of frames, then the graphics thread copies frames in to the cache
and increments a semaphore to schedule the encoder thread to encode that
data.
In the cache, each frame has an encode counter. If the frame cache is
full (e.g., the encoder taking too long to return frames), it will not
cache a new frame, but instead will just increment the counter on the
last frame in the cache to schedule that frame to encode again, ensuring
that frames are on time and reducing CPU usage by lowering video
complexity. If the graphics thread takes too long to render a frame,
then it will add that frame with the count value set to the total amount
of frames that were missed (actual legitimately duplicated frames).
Because the cache gives many frames of breathing room for the encoder to
encode frames, this design helps improve results especially when using
encoding presets that have higher complexity and CPU usage, minimizing
the risk of needlessly skipped or duplicated frames.
I also managed to sneak in what should be a bit of an optimization to
reduce copying of frame data, though how much of an optimization it
ultimately ends up being is debatable.
So to sum it up, this commit increases accuracy of frame timing,
completely removes stray frame duplication, gives better results for
higher complexity encoding presets, and potentially optimizes the frame
pipeline a tiny bit.
2014-12-31 01:53:13 -08:00
|
|
|
struct obs_vframe_info vframe_info;
|
|
|
|
circlebuf_pop_front(&video->vframe_info_buffer, &vframe_info,
|
2019-06-22 22:13:45 -07:00
|
|
|
sizeof(vframe_info));
|
2014-10-21 20:08:39 -07:00
|
|
|
|
libobs: Redesign/optimize frame encoding handling
Previously, the design for the interaction between the encoder thread
and the graphics thread was that the encoder thread would signal to the
graphics thread when to start drawing each frame. The original idea
behind this was to prevent mutually cascading stalls of encoding or
graphics rendering (i.e., if rendering took too long, then encoding
would have to catch up, then rendering would have to catch up again, and
so on, cascading upon each other). The ultimate goal was to prevent
encoding from impacting graphics and vise versa.
However, eventually it was realized that there were some fundamental
flaws with this design.
1. Stray frame duplication. You could not guarantee that a frame would
render on time, so sometimes frames would unintentionally be lost if
there was any sort of minor hiccup or if the thread took too long to
be scheduled I'm guessing.
2. Frame timing in the rendering thread was less accurate. The only
place where frame timing was accurate was in the encoder thread, and
the graphics thread was at the whim of thread scheduling. On higher
end computers it was typically fine, but it was just generally not
guaranteed that a frame would be rendered when it was supposed to be
rendered.
So the solution (originally proposed by r1ch and paibox) is to instead
keep the encoding and graphics threads separate as usual, but instead of
the encoder thread controlling the graphics thread, the graphics thread
now controls the encoder thread. The encoder thread keeps a limited
cache of frames, then the graphics thread copies frames in to the cache
and increments a semaphore to schedule the encoder thread to encode that
data.
In the cache, each frame has an encode counter. If the frame cache is
full (e.g., the encoder taking too long to return frames), it will not
cache a new frame, but instead will just increment the counter on the
last frame in the cache to schedule that frame to encode again, ensuring
that frames are on time and reducing CPU usage by lowering video
complexity. If the graphics thread takes too long to render a frame,
then it will add that frame with the count value set to the total amount
of frames that were missed (actual legitimately duplicated frames).
Because the cache gives many frames of breathing room for the encoder to
encode frames, this design helps improve results especially when using
encoding presets that have higher complexity and CPU usage, minimizing
the risk of needlessly skipped or duplicated frames.
I also managed to sneak in what should be a bit of an optimization to
reduce copying of frame data, though how much of an optimization it
ultimately ends up being is debatable.
So to sum it up, this commit increases accuracy of frame timing,
completely removes stray frame duplication, gives better results for
higher complexity encoding presets, and potentially optimizes the frame
pipeline a tiny bit.
2014-12-31 01:53:13 -08:00
|
|
|
frame.timestamp = vframe_info.timestamp;
|
2015-07-10 23:04:46 -07:00
|
|
|
profile_start(output_frame_output_video_data_name);
|
libobs: Redesign/optimize frame encoding handling
Previously, the design for the interaction between the encoder thread
and the graphics thread was that the encoder thread would signal to the
graphics thread when to start drawing each frame. The original idea
behind this was to prevent mutually cascading stalls of encoding or
graphics rendering (i.e., if rendering took too long, then encoding
would have to catch up, then rendering would have to catch up again, and
so on, cascading upon each other). The ultimate goal was to prevent
encoding from impacting graphics and vise versa.
However, eventually it was realized that there were some fundamental
flaws with this design.
1. Stray frame duplication. You could not guarantee that a frame would
render on time, so sometimes frames would unintentionally be lost if
there was any sort of minor hiccup or if the thread took too long to
be scheduled I'm guessing.
2. Frame timing in the rendering thread was less accurate. The only
place where frame timing was accurate was in the encoder thread, and
the graphics thread was at the whim of thread scheduling. On higher
end computers it was typically fine, but it was just generally not
guaranteed that a frame would be rendered when it was supposed to be
rendered.
So the solution (originally proposed by r1ch and paibox) is to instead
keep the encoding and graphics threads separate as usual, but instead of
the encoder thread controlling the graphics thread, the graphics thread
now controls the encoder thread. The encoder thread keeps a limited
cache of frames, then the graphics thread copies frames in to the cache
and increments a semaphore to schedule the encoder thread to encode that
data.
In the cache, each frame has an encode counter. If the frame cache is
full (e.g., the encoder taking too long to return frames), it will not
cache a new frame, but instead will just increment the counter on the
last frame in the cache to schedule that frame to encode again, ensuring
that frames are on time and reducing CPU usage by lowering video
complexity. If the graphics thread takes too long to render a frame,
then it will add that frame with the count value set to the total amount
of frames that were missed (actual legitimately duplicated frames).
Because the cache gives many frames of breathing room for the encoder to
encode frames, this design helps improve results especially when using
encoding presets that have higher complexity and CPU usage, minimizing
the risk of needlessly skipped or duplicated frames.
I also managed to sneak in what should be a bit of an optimization to
reduce copying of frame data, though how much of an optimization it
ultimately ends up being is debatable.
So to sum it up, this commit increases accuracy of frame timing,
completely removes stray frame duplication, gives better results for
higher complexity encoding presets, and potentially optimizes the frame
pipeline a tiny bit.
2014-12-31 01:53:13 -08:00
|
|
|
output_video_data(video, &frame, vframe_info.count);
|
2015-07-10 23:04:46 -07:00
|
|
|
profile_end(output_frame_output_video_data_name);
|
2014-10-21 20:08:39 -07:00
|
|
|
}
|
2013-09-30 19:37:13 -07:00
|
|
|
|
2014-02-05 19:36:21 -08:00
|
|
|
if (++video->cur_texture == NUM_TEXTURES)
|
|
|
|
video->cur_texture = 0;
|
2013-09-30 19:37:13 -07:00
|
|
|
}
|
|
|
|
|
2015-10-15 01:00:14 -07:00
|
|
|
#define NBSP "\xC2\xA0"
|
|
|
|
|
2019-02-05 17:37:40 -08:00
|
|
|
static void clear_base_frame_data(void)
|
2018-01-31 18:54:36 -08:00
|
|
|
{
|
|
|
|
struct obs_core_video *video = &obs->video;
|
2019-05-24 01:03:21 -07:00
|
|
|
video->texture_rendered = false;
|
|
|
|
video->texture_converted = false;
|
2018-01-31 18:54:36 -08:00
|
|
|
circlebuf_free(&video->vframe_info_buffer);
|
|
|
|
video->cur_texture = 0;
|
|
|
|
}
|
|
|
|
|
2019-02-05 17:37:40 -08:00
|
|
|
static void clear_raw_frame_data(void)
|
|
|
|
{
|
|
|
|
struct obs_core_video *video = &obs->video;
|
|
|
|
memset(video->textures_copied, 0, sizeof(video->textures_copied));
|
|
|
|
circlebuf_free(&video->vframe_info_buffer);
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef _WIN32
|
|
|
|
static void clear_gpu_frame_data(void)
|
|
|
|
{
|
|
|
|
struct obs_core_video *video = &obs->video;
|
|
|
|
circlebuf_free(&video->vframe_info_buffer_gpu);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2020-03-14 10:39:24 -07:00
|
|
|
extern THREAD_LOCAL bool is_graphics_thread;
|
|
|
|
|
|
|
|
static void execute_graphics_tasks(void)
|
|
|
|
{
|
|
|
|
struct obs_core_video *video = &obs->video;
|
|
|
|
bool tasks_remaining = true;
|
|
|
|
|
|
|
|
while (tasks_remaining) {
|
|
|
|
pthread_mutex_lock(&video->task_mutex);
|
|
|
|
if (video->tasks.size) {
|
|
|
|
struct obs_task_info info;
|
|
|
|
circlebuf_pop_front(&video->tasks, &info, sizeof(info));
|
|
|
|
info.task(info.param);
|
|
|
|
}
|
|
|
|
tasks_remaining = !!video->tasks.size;
|
|
|
|
pthread_mutex_unlock(&video->task_mutex);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-05-21 00:23:26 -07:00
|
|
|
#ifdef _WIN32
|
|
|
|
|
|
|
|
struct winrt_exports {
|
|
|
|
void (*winrt_initialize)();
|
|
|
|
void (*winrt_uninitialize)();
|
|
|
|
struct winrt_disaptcher *(*winrt_dispatcher_init)();
|
|
|
|
void (*winrt_dispatcher_free)(struct winrt_disaptcher *dispatcher);
|
|
|
|
void (*winrt_capture_thread_start)();
|
|
|
|
void (*winrt_capture_thread_stop)();
|
|
|
|
};
|
|
|
|
|
|
|
|
#define WINRT_IMPORT(func) \
|
|
|
|
do { \
|
|
|
|
exports->func = os_dlsym(module, #func); \
|
|
|
|
if (!exports->func) { \
|
|
|
|
success = false; \
|
|
|
|
blog(LOG_ERROR, \
|
|
|
|
"Could not load function '%s' from " \
|
|
|
|
"module '%s'", \
|
|
|
|
#func, module_name); \
|
|
|
|
} \
|
|
|
|
} while (false)
|
|
|
|
|
|
|
|
static bool load_winrt_imports(struct winrt_exports *exports, void *module,
|
|
|
|
const char *module_name)
|
|
|
|
{
|
|
|
|
bool success = true;
|
|
|
|
|
|
|
|
WINRT_IMPORT(winrt_initialize);
|
|
|
|
WINRT_IMPORT(winrt_uninitialize);
|
|
|
|
WINRT_IMPORT(winrt_dispatcher_init);
|
|
|
|
WINRT_IMPORT(winrt_dispatcher_free);
|
|
|
|
WINRT_IMPORT(winrt_capture_thread_start);
|
|
|
|
WINRT_IMPORT(winrt_capture_thread_stop);
|
|
|
|
|
|
|
|
return success;
|
|
|
|
}
|
|
|
|
|
|
|
|
struct winrt_state {
|
|
|
|
bool loaded;
|
|
|
|
void *winrt_module;
|
|
|
|
struct winrt_exports exports;
|
|
|
|
struct winrt_disaptcher *dispatcher;
|
|
|
|
};
|
|
|
|
|
|
|
|
static void init_winrt_state(struct winrt_state *winrt)
|
|
|
|
{
|
|
|
|
static const char *const module_name = "libobs-winrt";
|
|
|
|
|
|
|
|
winrt->winrt_module = os_dlopen(module_name);
|
|
|
|
winrt->loaded = winrt->winrt_module &&
|
|
|
|
load_winrt_imports(&winrt->exports, winrt->winrt_module,
|
|
|
|
module_name);
|
|
|
|
winrt->dispatcher = NULL;
|
|
|
|
if (winrt->loaded) {
|
|
|
|
winrt->exports.winrt_initialize();
|
|
|
|
winrt->dispatcher = winrt->exports.winrt_dispatcher_init();
|
|
|
|
|
|
|
|
gs_enter_context(obs->video.graphics);
|
|
|
|
winrt->exports.winrt_capture_thread_start();
|
|
|
|
gs_leave_context();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void uninit_winrt_state(struct winrt_state *winrt)
|
|
|
|
{
|
|
|
|
if (winrt->winrt_module) {
|
|
|
|
if (winrt->loaded) {
|
|
|
|
winrt->exports.winrt_capture_thread_stop();
|
|
|
|
if (winrt->dispatcher)
|
|
|
|
winrt->exports.winrt_dispatcher_free(
|
|
|
|
winrt->dispatcher);
|
|
|
|
winrt->exports.winrt_uninitialize();
|
|
|
|
}
|
|
|
|
|
|
|
|
os_dlclose(winrt->winrt_module);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif // #ifdef _WIN32
|
|
|
|
|
2015-07-10 23:04:46 -07:00
|
|
|
static const char *tick_sources_name = "tick_sources";
|
|
|
|
static const char *render_displays_name = "render_displays";
|
|
|
|
static const char *output_frame_name = "output_frame";
|
2020-06-08 09:38:00 -07:00
|
|
|
bool obs_graphics_thread_loop(struct obs_graphics_context *context)
|
2013-09-30 19:37:13 -07:00
|
|
|
{
|
2020-06-08 09:38:00 -07:00
|
|
|
/* defer loop break to clean up sources */
|
|
|
|
const bool stop_requested = video_output_stopped(obs->video.video);
|
2020-05-21 00:23:26 -07:00
|
|
|
|
2020-06-08 09:38:00 -07:00
|
|
|
uint64_t frame_start = os_gettime_ns();
|
|
|
|
uint64_t frame_time_ns;
|
|
|
|
bool raw_active = obs->video.raw_active > 0;
|
2019-02-20 14:26:21 -08:00
|
|
|
#ifdef _WIN32
|
2020-06-08 09:38:00 -07:00
|
|
|
const bool gpu_active = obs->video.gpu_encoder_active > 0;
|
|
|
|
const bool active = raw_active || gpu_active;
|
|
|
|
#else
|
|
|
|
const bool gpu_active = 0;
|
|
|
|
const bool active = raw_active;
|
2019-02-20 14:26:21 -08:00
|
|
|
#endif
|
2013-09-30 19:37:13 -07:00
|
|
|
|
2020-06-08 09:38:00 -07:00
|
|
|
if (!context->was_active && active)
|
|
|
|
clear_base_frame_data();
|
|
|
|
if (!context->raw_was_active && raw_active)
|
|
|
|
clear_raw_frame_data();
|
|
|
|
#ifdef _WIN32
|
|
|
|
if (!context->gpu_was_active && gpu_active)
|
|
|
|
clear_gpu_frame_data();
|
2020-03-14 10:39:24 -07:00
|
|
|
|
2020-06-08 09:38:00 -07:00
|
|
|
context->gpu_was_active = gpu_active;
|
|
|
|
#endif
|
|
|
|
context->raw_was_active = raw_active;
|
|
|
|
context->was_active = active;
|
2015-06-04 16:48:56 -07:00
|
|
|
|
2020-06-08 09:38:00 -07:00
|
|
|
profile_start(context->video_thread_name);
|
2015-01-02 05:36:09 -08:00
|
|
|
|
2020-06-08 09:38:00 -07:00
|
|
|
gs_enter_context(obs->video.graphics);
|
|
|
|
gs_begin_frame();
|
|
|
|
gs_leave_context();
|
2015-07-10 23:04:46 -07:00
|
|
|
|
2020-06-08 09:38:00 -07:00
|
|
|
profile_start(tick_sources_name);
|
|
|
|
context->last_time =
|
|
|
|
tick_sources(obs->video.video_time, context->last_time);
|
|
|
|
profile_end(tick_sources_name);
|
2017-10-03 18:48:12 -07:00
|
|
|
|
2020-06-08 09:38:00 -07:00
|
|
|
execute_graphics_tasks();
|
2020-02-23 19:43:10 -08:00
|
|
|
|
2019-02-05 17:37:40 -08:00
|
|
|
#ifdef _WIN32
|
2020-06-08 09:38:00 -07:00
|
|
|
MSG msg;
|
|
|
|
while (PeekMessage(&msg, NULL, 0, 0, PM_REMOVE)) {
|
|
|
|
TranslateMessage(&msg);
|
|
|
|
DispatchMessage(&msg);
|
|
|
|
}
|
2019-02-05 17:37:40 -08:00
|
|
|
#endif
|
|
|
|
|
2020-06-08 09:38:00 -07:00
|
|
|
profile_start(output_frame_name);
|
|
|
|
output_frame(raw_active, gpu_active);
|
|
|
|
profile_end(output_frame_name);
|
2019-02-20 14:26:21 -08:00
|
|
|
|
2020-06-08 09:38:00 -07:00
|
|
|
profile_start(render_displays_name);
|
|
|
|
render_displays();
|
|
|
|
profile_end(render_displays_name);
|
2017-05-12 16:21:51 -07:00
|
|
|
|
2020-06-08 09:38:00 -07:00
|
|
|
frame_time_ns = os_gettime_ns() - frame_start;
|
2015-07-10 23:04:46 -07:00
|
|
|
|
2020-06-08 09:38:00 -07:00
|
|
|
profile_end(context->video_thread_name);
|
2019-10-10 21:06:01 -07:00
|
|
|
|
2020-06-08 09:38:00 -07:00
|
|
|
profile_reenable_thread();
|
2014-02-09 04:51:06 -08:00
|
|
|
|
2020-06-08 09:38:00 -07:00
|
|
|
video_sleep(&obs->video, raw_active, gpu_active, &obs->video.video_time,
|
|
|
|
context->interval);
|
2020-03-14 10:39:24 -07:00
|
|
|
|
2020-06-08 09:38:00 -07:00
|
|
|
context->frame_time_total_ns += frame_time_ns;
|
|
|
|
context->fps_total_ns += (obs->video.video_time - context->last_time);
|
|
|
|
context->fps_total_frames++;
|
2019-11-27 16:38:35 -08:00
|
|
|
|
2020-06-08 09:38:00 -07:00
|
|
|
if (context->fps_total_ns >= 1000000000ULL) {
|
|
|
|
obs->video.video_fps =
|
|
|
|
(double)context->fps_total_frames /
|
|
|
|
((double)context->fps_total_ns / 1000000000.0);
|
|
|
|
obs->video.video_avg_frame_time_ns =
|
|
|
|
context->frame_time_total_ns /
|
|
|
|
(uint64_t)context->fps_total_frames;
|
2015-07-10 23:04:46 -07:00
|
|
|
|
2020-06-08 09:38:00 -07:00
|
|
|
context->frame_time_total_ns = 0;
|
|
|
|
context->fps_total_ns = 0;
|
|
|
|
context->fps_total_frames = 0;
|
|
|
|
}
|
2018-01-01 18:33:44 -08:00
|
|
|
|
2020-06-08 09:38:00 -07:00
|
|
|
return !stop_requested;
|
|
|
|
}
|
2017-05-12 16:21:51 -07:00
|
|
|
|
2020-06-08 09:38:00 -07:00
|
|
|
void *obs_graphics_thread(void *param)
|
|
|
|
{
|
|
|
|
#ifdef _WIN32
|
|
|
|
struct winrt_state winrt;
|
|
|
|
init_winrt_state(&winrt);
|
|
|
|
#endif // #ifdef _WIN32
|
2015-07-10 23:04:46 -07:00
|
|
|
|
2020-06-08 09:38:00 -07:00
|
|
|
is_graphics_thread = true;
|
2015-07-08 05:25:07 -07:00
|
|
|
|
2020-06-08 09:38:00 -07:00
|
|
|
const uint64_t interval = video_output_get_frame_time(obs->video.video);
|
2016-08-22 12:04:23 -07:00
|
|
|
|
2020-06-08 09:38:00 -07:00
|
|
|
obs->video.video_time = os_gettime_ns();
|
|
|
|
obs->video.video_frame_interval_ns = interval;
|
2016-08-22 12:04:23 -07:00
|
|
|
|
2020-06-08 09:38:00 -07:00
|
|
|
os_set_thread_name("libobs: graphics thread");
|
2017-05-12 16:21:51 -07:00
|
|
|
|
2020-06-08 09:38:00 -07:00
|
|
|
const char *video_thread_name = profile_store_name(
|
|
|
|
obs_get_profiler_name_store(),
|
|
|
|
"obs_graphics_thread(%g" NBSP "ms)", interval / 1000000.);
|
|
|
|
profile_register_root(video_thread_name, interval);
|
2020-02-23 19:43:10 -08:00
|
|
|
|
2020-06-08 09:38:00 -07:00
|
|
|
srand((unsigned int)time(NULL));
|
|
|
|
|
|
|
|
struct obs_graphics_context context;
|
|
|
|
context.interval = video_output_get_frame_time(obs->video.video);
|
|
|
|
context.frame_time_total_ns = 0;
|
|
|
|
context.fps_total_ns = 0;
|
|
|
|
context.fps_total_frames = 0;
|
|
|
|
context.last_time = 0;
|
|
|
|
#ifdef _WIN32
|
|
|
|
context.gpu_was_active = false;
|
|
|
|
#endif
|
|
|
|
context.raw_was_active = false;
|
|
|
|
context.was_active = false;
|
|
|
|
context.video_thread_name = video_thread_name;
|
|
|
|
|
|
|
|
#ifdef __APPLE__
|
|
|
|
while (obs_graphics_thread_loop_autorelease(&context))
|
|
|
|
#else
|
|
|
|
while (obs_graphics_thread_loop(&context))
|
|
|
|
#endif
|
|
|
|
;
|
2013-09-30 19:37:13 -07:00
|
|
|
|
2020-05-21 00:23:26 -07:00
|
|
|
#ifdef _WIN32
|
|
|
|
uninit_winrt_state(&winrt);
|
|
|
|
#endif
|
|
|
|
|
2014-02-14 14:13:36 -08:00
|
|
|
UNUSED_PARAMETER(param);
|
2013-09-30 19:37:13 -07:00
|
|
|
return NULL;
|
|
|
|
}
|