fill out the rest of the source video frame functions, added nv12 decompression function, and cleaned up the design of the source video frame stuff
This commit is contained in:
@@ -214,6 +214,42 @@ void decompress_420(const void *input_v, uint32_t width, uint32_t height,
|
||||
}
|
||||
}
|
||||
|
||||
void decompress_nv12(const void *input_v, uint32_t width, uint32_t height,
|
||||
uint32_t row_bytes, uint32_t start_y, uint32_t end_y,
|
||||
void *output_v)
|
||||
{
|
||||
uint8_t *output = output_v;
|
||||
const uint8_t *input = input_v;
|
||||
const uint8_t *input2 = input + width * height;
|
||||
|
||||
uint32_t start_y_d2 = start_y/2;
|
||||
uint32_t width_d2 = width/2;
|
||||
uint32_t height_d2 = end_y/2;
|
||||
uint32_t y;
|
||||
|
||||
for (y = start_y_d2; y < height_d2; y++) {
|
||||
const uint16_t *chroma = (uint16_t*)(input2 + y * width);
|
||||
register const uint8_t *lum0, *lum1;
|
||||
register uint32_t *output0, *output1;
|
||||
uint32_t x;
|
||||
|
||||
lum0 = input + y * 2*width;
|
||||
lum1 = lum0 + width;
|
||||
output0 = (uint32_t*)(output + y * 2*row_bytes);
|
||||
output1 = (uint32_t*)((uint8_t*)output0 + row_bytes);
|
||||
|
||||
for (x = 0; x < width_d2; x++) {
|
||||
uint32_t out = *(chroma++) << 8;
|
||||
|
||||
*(output0++) = *(lum0++) | out;
|
||||
*(output0++) = *(lum0++) | out;
|
||||
|
||||
*(output1++) = *(lum1++) | out;
|
||||
*(output1++) = *(lum1++) | out;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void decompress_422(const void *input_v, uint32_t width, uint32_t height,
|
||||
uint32_t row_bytes, uint32_t start_y, uint32_t end_y,
|
||||
void *output_v, bool leading_lum)
|
||||
@@ -221,8 +257,8 @@ void decompress_422(const void *input_v, uint32_t width, uint32_t height,
|
||||
const uint8_t *input = input_v;
|
||||
uint8_t *output = output_v;
|
||||
|
||||
uint32_t width_d2 = width>>1;
|
||||
uint32_t line_size = width*2;
|
||||
uint32_t width_d2 = width >> 1;
|
||||
uint32_t line_size = width * 2;
|
||||
uint32_t y;
|
||||
|
||||
register const uint32_t *input32;
|
||||
@@ -230,7 +266,7 @@ void decompress_422(const void *input_v, uint32_t width, uint32_t height,
|
||||
register uint32_t *output32;
|
||||
|
||||
if (leading_lum) {
|
||||
for (y = 0; y < height; y++) {
|
||||
for (y = start_y; y < end_y; y++) {
|
||||
input32 = (uint32_t*)(input + y*line_size);
|
||||
input32_end = input32 + width_d2;
|
||||
output32 = (uint32_t*)(output + y*row_bytes);
|
||||
@@ -248,7 +284,7 @@ void decompress_422(const void *input_v, uint32_t width, uint32_t height,
|
||||
}
|
||||
}
|
||||
} else {
|
||||
for (y = 0; y < height; y++) {
|
||||
for (y = start_y; y < end_y; y++) {
|
||||
input32 = (uint32_t*)(input + y*line_size);
|
||||
input32_end = input32 + width_d2;
|
||||
output32 = (uint32_t*)(output + y*row_bytes);
|
||||
|
@@ -31,6 +31,10 @@ EXPORT void compress_uyvx_to_nv12(const void *input,
|
||||
uint32_t width, uint32_t height, uint32_t row_bytes,
|
||||
uint32_t start_y, uint32_t end_y, void **output);
|
||||
|
||||
EXPORT void decompress_nv12(const void *input,
|
||||
uint32_t width, uint32_t height, uint32_t row_bytes,
|
||||
uint32_t start_y, uint32_t end_y, void *output);
|
||||
|
||||
EXPORT void decompress_420(const void *input,
|
||||
uint32_t width, uint32_t height, uint32_t row_bytes,
|
||||
uint32_t start_y, uint32_t end_y, void *output);
|
||||
|
@@ -36,8 +36,8 @@ enum video_type {
|
||||
VIDEO_FORMAT_UNKNOWN,
|
||||
|
||||
/* planar 420 format */
|
||||
VIDEO_FORMAT_I420, /* planar 4:2:0 */
|
||||
VIDEO_FORMAT_NV12, /* two-plane lum and packed chroma */
|
||||
VIDEO_FORMAT_I420, /* three-plane */
|
||||
VIDEO_FORMAT_NV12, /* two-plane, lum and packed chroma */
|
||||
|
||||
/* packed 422 formats */
|
||||
VIDEO_FORMAT_YVYU,
|
||||
@@ -45,7 +45,8 @@ enum video_type {
|
||||
VIDEO_FORMAT_UYVY,
|
||||
|
||||
/* packed uncompressed formats */
|
||||
VIDEO_FORMAT_UYVX, /* packed UYV */
|
||||
VIDEO_FORMAT_YUVX,
|
||||
VIDEO_FORMAT_UYVX,
|
||||
VIDEO_FORMAT_RGBA,
|
||||
VIDEO_FORMAT_BGRA,
|
||||
VIDEO_FORMAT_BGRX,
|
||||
|
@@ -15,6 +15,7 @@
|
||||
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
******************************************************************************/
|
||||
|
||||
#include "media-io\format-conversion.h"
|
||||
#include "util/platform.h"
|
||||
|
||||
#include "obs.h"
|
||||
@@ -297,20 +298,111 @@ static bool set_texture_size(obs_source_t source, struct source_frame *frame)
|
||||
return source->output_texture != NULL;
|
||||
}
|
||||
|
||||
enum convert_type {
|
||||
CONVERT_NONE,
|
||||
CONVERT_NV12,
|
||||
CONVERT_420,
|
||||
CONVERT_422_U,
|
||||
CONVERT_422_Y,
|
||||
};
|
||||
|
||||
static inline enum convert_type get_convert_type(enum video_type type)
|
||||
{
|
||||
switch (type) {
|
||||
case VIDEO_FORMAT_I420:
|
||||
return CONVERT_420;
|
||||
case VIDEO_FORMAT_NV12:
|
||||
return CONVERT_NV12;
|
||||
|
||||
case VIDEO_FORMAT_YVYU:
|
||||
case VIDEO_FORMAT_YUY2:
|
||||
return CONVERT_422_Y;
|
||||
case VIDEO_FORMAT_UYVY:
|
||||
return CONVERT_422_U;
|
||||
|
||||
case VIDEO_FORMAT_UNKNOWN:
|
||||
case VIDEO_FORMAT_YUVX:
|
||||
case VIDEO_FORMAT_UYVX:
|
||||
case VIDEO_FORMAT_RGBA:
|
||||
case VIDEO_FORMAT_BGRA:
|
||||
case VIDEO_FORMAT_BGRX:
|
||||
return CONVERT_NONE;
|
||||
}
|
||||
|
||||
return CONVERT_NONE;
|
||||
}
|
||||
|
||||
static inline bool is_yuv(enum video_type type)
|
||||
{
|
||||
switch (type) {
|
||||
case VIDEO_FORMAT_I420:
|
||||
case VIDEO_FORMAT_NV12:
|
||||
case VIDEO_FORMAT_YVYU:
|
||||
case VIDEO_FORMAT_YUY2:
|
||||
case VIDEO_FORMAT_UYVY:
|
||||
case VIDEO_FORMAT_YUVX:
|
||||
case VIDEO_FORMAT_UYVX:
|
||||
return true;
|
||||
case VIDEO_FORMAT_UNKNOWN:
|
||||
case VIDEO_FORMAT_RGBA:
|
||||
case VIDEO_FORMAT_BGRA:
|
||||
case VIDEO_FORMAT_BGRX:
|
||||
return false;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool upload_frame(texture_t tex, const struct source_frame *frame)
|
||||
{
|
||||
void *ptr;
|
||||
uint32_t row_bytes;
|
||||
enum convert_type type = get_convert_type(frame->type);
|
||||
|
||||
if (type == CONVERT_NONE) {
|
||||
texture_setimage(tex, frame->data, frame->row_bytes, false);
|
||||
return true;
|
||||
}
|
||||
|
||||
if (!texture_map(tex, &ptr, &row_bytes))
|
||||
return false;
|
||||
|
||||
if (type == CONVERT_420)
|
||||
decompress_420(frame->data, frame->width, frame->height,
|
||||
frame->row_bytes, 0, frame->height, ptr);
|
||||
|
||||
else if (type == CONVERT_NV12)
|
||||
decompress_nv12(frame->data, frame->width, frame->height,
|
||||
frame->row_bytes, 0, frame->height, ptr);
|
||||
|
||||
else if (type == CONVERT_422_Y)
|
||||
decompress_422(frame->data, frame->width, frame->height,
|
||||
frame->row_bytes, 0, frame->height, ptr, true);
|
||||
|
||||
else if (type == CONVERT_422_U)
|
||||
decompress_422(frame->data, frame->width, frame->height,
|
||||
frame->row_bytes, 0, frame->height, ptr, false);
|
||||
|
||||
texture_unmap(tex);
|
||||
return true;
|
||||
}
|
||||
|
||||
static void obs_source_draw_texture(texture_t tex, struct source_frame *frame)
|
||||
{
|
||||
effect_t effect = obs->default_effect;
|
||||
const char *type = frame->yuv ? "DrawYUVToRGB" : "DrawRGB";
|
||||
bool yuv = is_yuv(frame->type);
|
||||
const char *type = yuv ? "DrawYUVToRGB" : "DrawRGB";
|
||||
technique_t tech;
|
||||
eparam_t param;
|
||||
eparam_t param;
|
||||
|
||||
texture_setimage(tex, frame->data, frame->row_bytes, frame->flip);
|
||||
if (!upload_frame(tex, frame))
|
||||
return;
|
||||
|
||||
tech = effect_gettechnique(effect, type);
|
||||
technique_begin(tech);
|
||||
technique_beginpass(tech, 0);
|
||||
|
||||
if (frame->yuv) {
|
||||
if (yuv) {
|
||||
param = effect_getparambyname(effect, "yuv_matrix");
|
||||
effect_setval(effect, param, frame->yuv_matrix,
|
||||
sizeof(float) * 16);
|
||||
@@ -335,8 +427,10 @@ static void obs_source_render_async_video(obs_source_t source)
|
||||
if (!source->timing_set && source->audio_buffer.num)
|
||||
obs_source_flush_audio_buffer(source);
|
||||
|
||||
if (set_texture_size(source, frame))
|
||||
if (set_texture_size(source, frame)) {
|
||||
source->flip = frame->flip;
|
||||
obs_source_draw_texture(source->output_texture, frame);
|
||||
}
|
||||
|
||||
obs_source_releaseframe(source, frame);
|
||||
}
|
||||
@@ -496,8 +590,8 @@ void obs_source_save_settings(obs_source_t source, const char *settings)
|
||||
dstr_copy(&source->settings, settings);
|
||||
}
|
||||
|
||||
static inline struct filter_frame *filter_async_video(obs_source_t source,
|
||||
struct filter_frame *in)
|
||||
static inline struct source_frame *filter_async_video(obs_source_t source,
|
||||
struct source_frame *in)
|
||||
{
|
||||
size_t i;
|
||||
for (i = source->filters.num; i > 0; i--) {
|
||||
@@ -512,27 +606,31 @@ static inline struct filter_frame *filter_async_video(obs_source_t source,
|
||||
return in;
|
||||
}
|
||||
|
||||
static struct filter_frame *process_video(obs_source_t source,
|
||||
const struct source_video *frame)
|
||||
static inline struct source_frame *cache_video(obs_source_t source,
|
||||
const struct source_frame *frame)
|
||||
{
|
||||
/* TODO: convert to UYV444 or RGB */
|
||||
return NULL;
|
||||
/* TODO: use an actual cache */
|
||||
struct source_frame *new_frame = bmalloc(sizeof(struct source_frame));
|
||||
memcpy(new_frame, frame, sizeof(struct source_frame));
|
||||
new_frame->data = bmalloc(frame->row_bytes * frame->height);
|
||||
|
||||
return new_frame;
|
||||
}
|
||||
|
||||
void obs_source_output_video(obs_source_t source,
|
||||
const struct source_video *frame)
|
||||
const struct source_frame *frame)
|
||||
{
|
||||
struct filter_frame *output;
|
||||
|
||||
output = process_video(source, frame);
|
||||
struct source_frame *output = cache_video(source, frame);
|
||||
|
||||
pthread_mutex_lock(&source->filter_mutex);
|
||||
output = filter_async_video(source, output);
|
||||
pthread_mutex_unlock(&source->filter_mutex);
|
||||
|
||||
pthread_mutex_lock(&source->video_mutex);
|
||||
da_push_back(source->video_frames, &output);
|
||||
pthread_mutex_unlock(&source->video_mutex);
|
||||
if (output) {
|
||||
pthread_mutex_lock(&source->video_mutex);
|
||||
da_push_back(source->video_frames, &output);
|
||||
pthread_mutex_unlock(&source->video_mutex);
|
||||
}
|
||||
}
|
||||
|
||||
static inline const struct audio_data *filter_async_audio(obs_source_t source,
|
||||
@@ -595,13 +693,63 @@ void obs_source_output_audio(obs_source_t source,
|
||||
pthread_mutex_unlock(&source->filter_mutex);
|
||||
}
|
||||
|
||||
/*
|
||||
* Ensures that cached frames are displayed on time. If multiple frames
|
||||
* were cached between renders, then releases the unnecessary frames and uses
|
||||
* the frame with the closest timing.
|
||||
*/
|
||||
struct source_frame *obs_source_getframe(obs_source_t source)
|
||||
{
|
||||
/* TODO */
|
||||
return NULL;
|
||||
uint64_t last_frame_time = source->last_frame_timestamp;
|
||||
struct source_frame *frame = NULL;
|
||||
struct source_frame *next_frame;
|
||||
uint64_t sys_time, frame_time;
|
||||
|
||||
pthread_mutex_lock(&source->video_mutex);
|
||||
|
||||
if (!source->video_frames.num)
|
||||
goto unlock;
|
||||
|
||||
next_frame = source->video_frames.array[0];
|
||||
sys_time = os_gettime_ns();
|
||||
frame_time = next_frame->timestamp;
|
||||
|
||||
if (!source->last_frame_timestamp) {
|
||||
frame = next_frame;
|
||||
da_erase(source->video_frames, 0);
|
||||
|
||||
source->last_frame_timestamp = frame_time;
|
||||
} else {
|
||||
uint64_t sys_offset, frame_offset;
|
||||
sys_offset = sys_time - source->last_sys_timestamp;
|
||||
frame_offset = frame_time - last_frame_time;
|
||||
|
||||
source->last_frame_timestamp += sys_offset;
|
||||
|
||||
while (frame_offset <= sys_offset) {
|
||||
if (frame)
|
||||
source_frame_destroy(frame);
|
||||
|
||||
frame = next_frame;
|
||||
da_erase(source->video_frames, 0);
|
||||
|
||||
if (!source->video_frames.num)
|
||||
break;
|
||||
|
||||
next_frame = source->video_frames.array[0];
|
||||
frame_time = next_frame->timestamp;
|
||||
frame_offset = frame_time - last_frame_time;
|
||||
}
|
||||
}
|
||||
|
||||
source->last_sys_timestamp = sys_time;
|
||||
|
||||
unlock:
|
||||
pthread_mutex_unlock(&source->video_mutex);
|
||||
return frame;
|
||||
}
|
||||
|
||||
void obs_source_releaseframe(obs_source_t source, struct source_frame *frame)
|
||||
{
|
||||
/* TODO */
|
||||
source_frame_destroy(frame);
|
||||
}
|
||||
|
@@ -136,8 +136,8 @@
|
||||
* Return value: true if sources remaining, otherwise false.
|
||||
*
|
||||
* ---------------------------------------------------------
|
||||
* struct filter_frame *[name]_filter_video(void *data,
|
||||
* struct filter_frame *frame);
|
||||
* struct source_frame *[name]_filter_video(void *data,
|
||||
* const struct source_frame *frame);
|
||||
* Filters audio data. Used with audio filters.
|
||||
*
|
||||
* frame: Video frame data.
|
||||
@@ -185,8 +185,8 @@ struct source_info {
|
||||
|
||||
bool (*enum_children)(void *data, size_t idx, obs_source_t *child);
|
||||
|
||||
struct filter_frame *(*filter_video)(void *data,
|
||||
struct filter_frame *frame);
|
||||
struct source_frame *(*filter_video)(void *data,
|
||||
const struct source_frame *frame);
|
||||
const struct audio_data *(*filter_audio)(void *data,
|
||||
const struct audio_data *audio);
|
||||
};
|
||||
@@ -211,7 +211,10 @@ struct obs_source {
|
||||
/* async video and audio */
|
||||
bool timing_set;
|
||||
uint64_t timing_adjust;
|
||||
uint64_t last_frame_timestamp;
|
||||
uint64_t last_sys_timestamp;
|
||||
texture_t output_texture;
|
||||
bool flip;
|
||||
|
||||
audio_line_t audio_line;
|
||||
DARRAY(struct audiobuf) audio_buffer;
|
||||
|
17
libobs/obs.h
17
libobs/obs.h
@@ -61,19 +61,6 @@ struct source_audio {
|
||||
uint64_t timestamp;
|
||||
};
|
||||
|
||||
struct source_video {
|
||||
const void *data;
|
||||
uint32_t width;
|
||||
uint32_t height;
|
||||
uint32_t row_bytes;
|
||||
uint64_t timestamp;
|
||||
|
||||
enum video_type type;
|
||||
float yuv_matrix[16];
|
||||
bool flip;
|
||||
};
|
||||
|
||||
/* differs from source_video in that it's YUV444 or RGB only */
|
||||
struct source_frame {
|
||||
void *data;
|
||||
uint32_t width;
|
||||
@@ -81,7 +68,7 @@ struct source_frame {
|
||||
uint32_t row_bytes;
|
||||
uint64_t timestamp;
|
||||
|
||||
bool yuv;
|
||||
enum video_type type;
|
||||
float yuv_matrix[16];
|
||||
bool flip;
|
||||
};
|
||||
@@ -291,7 +278,7 @@ EXPORT void obs_source_save_settings(obs_source_t source, const char *settings);
|
||||
|
||||
/** Outputs asynchronous video data */
|
||||
EXPORT void obs_source_obs_async_video(obs_source_t source,
|
||||
const struct video_frame *frame);
|
||||
const struct source_frame *frame);
|
||||
|
||||
/** Outputs audio data (always asynchronous) */
|
||||
EXPORT void obs_source_obs_async_audio(obs_source_t source,
|
||||
|
Reference in New Issue
Block a user