Remove majority of warnings
There were a *lot* of warnings, managed to remove most of them. Also, put warning flags before C_FLAGS and CXX_FLAGS, rather than after, as -Wall -Wextra was overwriting flags that came before it.
This commit is contained in:
@@ -96,6 +96,7 @@ set(libobs_mediaio_SOURCES
|
||||
media-io/format-conversion.c
|
||||
media-io/audio-io.c)
|
||||
set(libobs_mediaio_HEADERS
|
||||
media-io/media-io-defs.h
|
||||
media-io/format-conversion.h
|
||||
media-io/video-io.h
|
||||
media-io/audio-resampler.h
|
||||
|
@@ -186,9 +186,9 @@ static inline bool calldata_getsize (calldata_t data, const char *name,
|
||||
}
|
||||
|
||||
static inline bool calldata_getptr (calldata_t data, const char *name,
|
||||
void **ptr)
|
||||
void *p_ptr)
|
||||
{
|
||||
return calldata_getdata(data, name, ptr, sizeof(*ptr));
|
||||
return calldata_getdata(data, name, p_ptr, sizeof(p_ptr));
|
||||
}
|
||||
|
||||
EXPORT bool calldata_getstring(calldata_t data, const char *name,
|
||||
|
@@ -63,8 +63,7 @@ struct gs_exports {
|
||||
indexbuffer_t (*device_create_indexbuffer)(device_t device,
|
||||
enum gs_index_type type, void *indices, size_t num,
|
||||
uint32_t flags);
|
||||
enum gs_texture_type (*device_gettexturetype)(device_t device,
|
||||
texture_t texture);
|
||||
enum gs_texture_type (*device_gettexturetype)(texture_t texture);
|
||||
void (*device_load_vertexbuffer)(device_t device,
|
||||
vertbuffer_t vertbuffer);
|
||||
void (*device_load_indexbuffer)(device_t device,
|
||||
@@ -157,8 +156,8 @@ struct gs_exports {
|
||||
uint32_t (*stagesurface_getheight)(stagesurf_t stagesurf);
|
||||
enum gs_color_format (*stagesurface_getcolorformat)(
|
||||
stagesurf_t stagesurf);
|
||||
bool (*stagesurface_map)(stagesurf_t stagesurf, const void **data,
|
||||
uint32_t *linesize);
|
||||
bool (*stagesurface_map)(stagesurf_t stagesurf,
|
||||
const uint8_t **data, uint32_t *linesize);
|
||||
void (*stagesurface_unmap)(stagesurf_t stagesurf);
|
||||
|
||||
void (*zstencil_destroy)(zstencil_t zstencil);
|
||||
|
@@ -527,6 +527,8 @@ void gs_normal3v(const struct vec3 *v)
|
||||
void gs_color4v(const struct vec4 *v)
|
||||
{
|
||||
/* TODO */
|
||||
#pragma message ("TODO: implement gs_color4v")
|
||||
UNUSED_PARAMETER(v);
|
||||
}
|
||||
|
||||
void gs_texcoord2v(const struct vec2 *v, int unit)
|
||||
@@ -542,6 +544,7 @@ void gs_texcoord2v(const struct vec2 *v, int unit)
|
||||
input_t gs_getinput(void)
|
||||
{
|
||||
/* TODO */
|
||||
#pragma message ("TODO: implement gs_getinput (hmm, not sure about input yet)")
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@@ -628,18 +631,27 @@ shader_t gs_create_pixelshader_from_file(const char *file, char **error_string)
|
||||
texture_t gs_create_texture_from_file(const char *file, uint32_t flags)
|
||||
{
|
||||
/* TODO */
|
||||
#pragma message ("TODO: implement gs_create_texture_from_file")
|
||||
UNUSED_PARAMETER(file);
|
||||
UNUSED_PARAMETER(flags);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
texture_t gs_create_cubetexture_from_file(const char *file, uint32_t flags)
|
||||
{
|
||||
/* TODO */
|
||||
#pragma message ("TODO: implement gs_create_cubetexture_from_file")
|
||||
UNUSED_PARAMETER(file);
|
||||
UNUSED_PARAMETER(flags);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
texture_t gs_create_volumetexture_from_file(const char *file, uint32_t flags)
|
||||
{
|
||||
/* TODO */
|
||||
#pragma message ("TODO: implement gs_create_volumetexture_from_file")
|
||||
UNUSED_PARAMETER(file);
|
||||
UNUSED_PARAMETER(flags);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@@ -667,8 +679,7 @@ static inline void assign_sprite_uv(float *start, float *end, bool flip)
|
||||
}
|
||||
|
||||
static void build_sprite(struct vb_data *data, float fcx, float fcy,
|
||||
float start_u, float end_u, float start_v, float end_v,
|
||||
uint32_t flip)
|
||||
float start_u, float end_u, float start_v, float end_v)
|
||||
{
|
||||
struct vec2 *tvarray = data->tvarray[0].array;
|
||||
|
||||
@@ -690,7 +701,7 @@ static inline void build_sprite_norm(struct vb_data *data, float fcx, float fcy,
|
||||
|
||||
assign_sprite_uv(&start_u, &end_u, (flip & GS_FLIP_U) != 0);
|
||||
assign_sprite_uv(&start_v, &end_v, (flip & GS_FLIP_V) != 0);
|
||||
build_sprite(data, fcx, fcy, start_u, end_u, start_v, end_v, flip);
|
||||
build_sprite(data, fcx, fcy, start_u, end_u, start_v, end_v);
|
||||
}
|
||||
|
||||
static inline void build_sprite_rect(struct vb_data *data, texture_t tex,
|
||||
@@ -703,7 +714,7 @@ static inline void build_sprite_rect(struct vb_data *data, texture_t tex,
|
||||
|
||||
assign_sprite_rect(&start_u, &end_u, width, (flip & GS_FLIP_U) != 0);
|
||||
assign_sprite_rect(&start_v, &end_v, height, (flip & GS_FLIP_V) != 0);
|
||||
build_sprite(data, fcx, fcy, start_u, end_u, start_v, end_v, flip);
|
||||
build_sprite(data, fcx, fcy, start_u, end_u, start_v, end_v);
|
||||
}
|
||||
|
||||
void gs_draw_sprite(texture_t tex, uint32_t flip, uint32_t width,
|
||||
@@ -740,6 +751,14 @@ void gs_draw_cube_backdrop(texture_t cubetex, const struct quat *rot,
|
||||
float left, float right, float top, float bottom, float znear)
|
||||
{
|
||||
/* TODO */
|
||||
#pragma message ("TODO: implement gs_draw_cube_backdrop")
|
||||
UNUSED_PARAMETER(cubetex);
|
||||
UNUSED_PARAMETER(rot);
|
||||
UNUSED_PARAMETER(left);
|
||||
UNUSED_PARAMETER(right);
|
||||
UNUSED_PARAMETER(top);
|
||||
UNUSED_PARAMETER(bottom);
|
||||
UNUSED_PARAMETER(znear);
|
||||
}
|
||||
|
||||
void gs_resetviewport(void)
|
||||
@@ -760,6 +779,10 @@ void gs_set2dmode(void)
|
||||
void gs_set3dmode(double fovy, double znear, double zvar)
|
||||
{
|
||||
/* TODO */
|
||||
#pragma message ("TODO: implement gs_set3dmode")
|
||||
UNUSED_PARAMETER(fovy);
|
||||
UNUSED_PARAMETER(znear);
|
||||
UNUSED_PARAMETER(zvar);
|
||||
}
|
||||
|
||||
void gs_viewport_push(void)
|
||||
@@ -817,6 +840,12 @@ void cubetexture_setimage(texture_t cubetex, uint32_t side, const void *data,
|
||||
uint32_t linesize, bool invert)
|
||||
{
|
||||
/* TODO */
|
||||
#pragma message ("TODO: implement cubetexture_setimage")
|
||||
UNUSED_PARAMETER(cubetex);
|
||||
UNUSED_PARAMETER(side);
|
||||
UNUSED_PARAMETER(data);
|
||||
UNUSED_PARAMETER(linesize);
|
||||
UNUSED_PARAMETER(invert);
|
||||
}
|
||||
|
||||
void gs_perspective(float angle, float aspect, float near, float far)
|
||||
@@ -999,8 +1028,7 @@ indexbuffer_t gs_create_indexbuffer(enum gs_index_type type,
|
||||
enum gs_texture_type gs_gettexturetype(texture_t texture)
|
||||
{
|
||||
graphics_t graphics = thread_graphics;
|
||||
return graphics->exports.device_gettexturetype(graphics->device,
|
||||
texture);
|
||||
return graphics->exports.device_gettexturetype(texture);
|
||||
}
|
||||
|
||||
void gs_load_vertexbuffer(vertbuffer_t vertbuffer)
|
||||
@@ -1526,7 +1554,7 @@ enum gs_color_format stagesurface_getcolorformat(stagesurf_t stagesurf)
|
||||
return graphics->exports.stagesurface_getcolorformat(stagesurf);
|
||||
}
|
||||
|
||||
bool stagesurface_map(stagesurf_t stagesurf, const void **data,
|
||||
bool stagesurface_map(stagesurf_t stagesurf, const uint8_t **data,
|
||||
uint32_t *linesize)
|
||||
{
|
||||
graphics_t graphics = thread_graphics;
|
||||
@@ -1610,5 +1638,3 @@ bool texture_rebind_iosurface(texture_t texture, void *iosurf)
|
||||
|
||||
return graphics->exports.texture_rebind_iosurface(texture, iosurf);
|
||||
}
|
||||
|
||||
|
||||
|
@@ -659,7 +659,7 @@ EXPORT void stagesurface_destroy(stagesurf_t stagesurf);
|
||||
EXPORT uint32_t stagesurface_getwidth(stagesurf_t stagesurf);
|
||||
EXPORT uint32_t stagesurface_getheight(stagesurf_t stagesurf);
|
||||
EXPORT enum gs_color_format stagesurface_getcolorformat(stagesurf_t stagesurf);
|
||||
EXPORT bool stagesurface_map(stagesurf_t stagesurf, const void **data,
|
||||
EXPORT bool stagesurface_map(stagesurf_t stagesurf, const uint8_t **data,
|
||||
uint32_t *linesize);
|
||||
EXPORT void stagesurface_unmap(stagesurf_t stagesurf);
|
||||
|
||||
|
@@ -39,9 +39,9 @@ struct audio_line {
|
||||
char *name;
|
||||
|
||||
struct audio_output *audio;
|
||||
struct circlebuf buffers[MAX_AUDIO_PLANES];
|
||||
struct circlebuf buffers[MAX_AV_PLANES];
|
||||
pthread_mutex_t mutex;
|
||||
DARRAY(uint8_t) volume_buffers[MAX_AUDIO_PLANES];
|
||||
DARRAY(uint8_t) volume_buffers[MAX_AV_PLANES];
|
||||
uint64_t base_timestamp;
|
||||
uint64_t last_timestamp;
|
||||
|
||||
@@ -55,7 +55,7 @@ struct audio_line {
|
||||
|
||||
static inline void audio_line_destroy_data(struct audio_line *line)
|
||||
{
|
||||
for (size_t i = 0; i < MAX_AUDIO_PLANES; i++) {
|
||||
for (size_t i = 0; i < MAX_AV_PLANES; i++) {
|
||||
circlebuf_free(&line->buffers[i]);
|
||||
da_free(line->volume_buffers[i]);
|
||||
}
|
||||
@@ -74,7 +74,7 @@ struct audio_output {
|
||||
pthread_t thread;
|
||||
event_t stop_event;
|
||||
|
||||
DARRAY(uint8_t) mix_buffers[MAX_AUDIO_PLANES];
|
||||
DARRAY(uint8_t) mix_buffers[MAX_AV_PLANES];
|
||||
|
||||
bool initialized;
|
||||
|
||||
@@ -196,7 +196,7 @@ static inline void do_audio_output(struct audio_output *audio,
|
||||
uint64_t timestamp, uint32_t frames)
|
||||
{
|
||||
struct audio_data data;
|
||||
for (size_t i = 0; i < MAX_AUDIO_PLANES; i++)
|
||||
for (size_t i = 0; i < MAX_AV_PLANES; i++)
|
||||
data.data[i] = audio->mix_buffers[i].array;
|
||||
data.frames = frames;
|
||||
data.timestamp = timestamp;
|
||||
@@ -415,7 +415,7 @@ void audio_output_close(audio_t audio)
|
||||
line = next;
|
||||
}
|
||||
|
||||
for (size_t i = 0; i < MAX_AUDIO_PLANES; i++)
|
||||
for (size_t i = 0; i < MAX_AV_PLANES; i++)
|
||||
da_free(audio->mix_buffers[i]);
|
||||
|
||||
event_destroy(&audio->stop_event);
|
||||
|
@@ -17,6 +17,7 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "media-io-defs.h"
|
||||
#include "../util/c99defs.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
@@ -28,8 +29,6 @@ extern "C" {
|
||||
* for the media.
|
||||
*/
|
||||
|
||||
#define MAX_AUDIO_PLANES 8
|
||||
|
||||
struct audio_output;
|
||||
struct audio_line;
|
||||
typedef struct audio_output *audio_t;
|
||||
@@ -64,7 +63,7 @@ enum speaker_layout {
|
||||
};
|
||||
|
||||
struct audio_data {
|
||||
const uint8_t *data[MAX_AUDIO_PLANES];
|
||||
const uint8_t *data[MAX_AV_PLANES];
|
||||
uint32_t frames;
|
||||
uint64_t timestamp;
|
||||
float volume;
|
||||
|
@@ -30,7 +30,7 @@ struct audio_resampler {
|
||||
uint64_t input_layout;
|
||||
enum AVSampleFormat input_format;
|
||||
|
||||
uint8_t *output_buffer[MAX_AUDIO_PLANES];
|
||||
uint8_t *output_buffer[MAX_AV_PLANES];
|
||||
uint64_t output_layout;
|
||||
enum AVSampleFormat output_format;
|
||||
int output_size;
|
||||
|
@@ -19,74 +19,74 @@
|
||||
#include <xmmintrin.h>
|
||||
#include <emmintrin.h>
|
||||
|
||||
/* ...surprisingly, if I don't use a macro to force inlining, it causes the
|
||||
* CPU usage to boost by a tremendous amount in debug builds. */
|
||||
|
||||
#define get_m128_32_0(val) (*((uint32_t*)&val))
|
||||
#define get_m128_32_1(val) (*(((uint32_t*)&val)+1))
|
||||
|
||||
static FORCE_INLINE void pack_lum(uint8_t *lum_plane,
|
||||
uint32_t lum_pos0, uint32_t lum_pos1,
|
||||
const __m128i line1, const __m128i line2,
|
||||
const __m128i lum_mask)
|
||||
#define pack_lum(lum_plane, lum_pos0, lum_pos1, line1, line2, lum_mask) \
|
||||
do { \
|
||||
__m128i pack_val = _mm_packs_epi32( \
|
||||
_mm_srli_si128(_mm_and_si128(line1, lum_mask), 1), \
|
||||
_mm_srli_si128(_mm_and_si128(line2, lum_mask), 1)); \
|
||||
pack_val = _mm_packus_epi16(pack_val, pack_val); \
|
||||
\
|
||||
*(uint32_t*)(lum_plane+lum_pos0) = get_m128_32_0(pack_val); \
|
||||
*(uint32_t*)(lum_plane+lum_pos1) = get_m128_32_1(pack_val); \
|
||||
} while (false)
|
||||
|
||||
#define pack_ch_1plane(uv_plane, chroma_pos, line1, line2, uv_mask) \
|
||||
do { \
|
||||
__m128i add_val = _mm_add_epi64( \
|
||||
_mm_and_si128(line1, uv_mask), \
|
||||
_mm_and_si128(line2, uv_mask)); \
|
||||
__m128i avg_val = _mm_add_epi64( \
|
||||
add_val, \
|
||||
_mm_shuffle_epi32(add_val, _MM_SHUFFLE(2, 3, 0, 1))); \
|
||||
avg_val = _mm_srai_epi16(avg_val, 2); \
|
||||
avg_val = _mm_shuffle_epi32(avg_val, _MM_SHUFFLE(3, 1, 2, 0)); \
|
||||
avg_val = _mm_packus_epi16(avg_val, avg_val); \
|
||||
\
|
||||
*(uint32_t*)(uv_plane+chroma_pos) = get_m128_32_0(avg_val); \
|
||||
} while (false)
|
||||
|
||||
#define pack_ch_2plane(u_plane, v_plane, chroma_pos, line1, line2, uv_mask) \
|
||||
do { \
|
||||
uint32_t packed_vals; \
|
||||
\
|
||||
__m128i add_val = _mm_add_epi64( \
|
||||
_mm_and_si128(line1, uv_mask), \
|
||||
_mm_and_si128(line2, uv_mask)); \
|
||||
__m128i avg_val = _mm_add_epi64( \
|
||||
add_val, \
|
||||
_mm_shuffle_epi32(add_val, _MM_SHUFFLE(2, 3, 0, 1))); \
|
||||
avg_val = _mm_srai_epi16(avg_val, 2); \
|
||||
avg_val = _mm_shuffle_epi32(avg_val, _MM_SHUFFLE(3, 1, 2, 0)); \
|
||||
avg_val = _mm_shufflelo_epi16(avg_val, _MM_SHUFFLE(3, 1, 2, 0)); \
|
||||
avg_val = _mm_packus_epi16(avg_val, avg_val); \
|
||||
\
|
||||
packed_vals = get_m128_32_0(avg_val); \
|
||||
\
|
||||
*(uint16_t*)(u_plane+chroma_pos) = (uint16_t)(packed_vals); \
|
||||
*(uint16_t*)(v_plane+chroma_pos) = (uint16_t)(packed_vals>>16); \
|
||||
} while (false)
|
||||
|
||||
|
||||
static FORCE_INLINE uint32_t min_uint32(uint32_t a, uint32_t b)
|
||||
{
|
||||
__m128i pack_val = _mm_packs_epi32(
|
||||
_mm_srli_si128(_mm_and_si128(line1, lum_mask), 1),
|
||||
_mm_srli_si128(_mm_and_si128(line2, lum_mask), 1));
|
||||
pack_val = _mm_packus_epi16(pack_val, pack_val);
|
||||
|
||||
*(uint32_t*)(lum_plane+lum_pos0) = get_m128_32_0(pack_val);
|
||||
*(uint32_t*)(lum_plane+lum_pos1) = get_m128_32_1(pack_val);
|
||||
}
|
||||
|
||||
static FORCE_INLINE void pack_chroma_1plane(uint8_t *uv_plane,
|
||||
uint32_t chroma_pos,
|
||||
const __m128i line1, const __m128i line2,
|
||||
const __m128i uv_mask)
|
||||
{
|
||||
__m128i add_val = _mm_add_epi64(
|
||||
_mm_and_si128(line1, uv_mask),
|
||||
_mm_and_si128(line2, uv_mask));
|
||||
__m128i avg_val = _mm_add_epi64(
|
||||
add_val,
|
||||
_mm_shuffle_epi32(add_val, _MM_SHUFFLE(2, 3, 0, 1)));
|
||||
avg_val = _mm_srai_epi16(avg_val, 2);
|
||||
avg_val = _mm_shuffle_epi32(avg_val, _MM_SHUFFLE(3, 1, 2, 0));
|
||||
avg_val = _mm_packus_epi16(avg_val, avg_val);
|
||||
|
||||
*(uint32_t*)(uv_plane+chroma_pos) = get_m128_32_0(avg_val);
|
||||
}
|
||||
|
||||
static FORCE_INLINE void pack_chroma_2plane(uint8_t *u_plane, uint8_t *v_plane,
|
||||
uint32_t chroma_pos,
|
||||
const __m128i line1, const __m128i line2,
|
||||
const __m128i uv_mask)
|
||||
{
|
||||
uint32_t packed_vals;
|
||||
|
||||
__m128i add_val = _mm_add_epi64(
|
||||
_mm_and_si128(line1, uv_mask),
|
||||
_mm_and_si128(line2, uv_mask));
|
||||
__m128i avg_val = _mm_add_epi64(
|
||||
add_val,
|
||||
_mm_shuffle_epi32(add_val, _MM_SHUFFLE(2, 3, 0, 1)));
|
||||
avg_val = _mm_srai_epi16(avg_val, 2);
|
||||
avg_val = _mm_shuffle_epi32(avg_val, _MM_SHUFFLE(3, 1, 2, 0));
|
||||
avg_val = _mm_shufflelo_epi16(avg_val, _MM_SHUFFLE(3, 1, 2, 0));
|
||||
avg_val = _mm_packus_epi16(avg_val, avg_val);
|
||||
|
||||
packed_vals = get_m128_32_0(avg_val);
|
||||
|
||||
*(uint16_t*)(u_plane+chroma_pos) = (uint16_t)(packed_vals);
|
||||
*(uint16_t*)(v_plane+chroma_pos) = (uint16_t)(packed_vals>>16);
|
||||
return a < b ? a : b;
|
||||
}
|
||||
|
||||
void compress_uyvx_to_i420(
|
||||
const uint8_t *input, uint32_t in_linesize,
|
||||
uint32_t width, uint32_t height,
|
||||
uint32_t start_y, uint32_t end_y,
|
||||
uint8_t *output[], const uint32_t out_linesize[])
|
||||
{
|
||||
uint8_t *lum_plane = output[0];
|
||||
uint8_t *u_plane = output[1];
|
||||
uint8_t *v_plane = output[2];
|
||||
uint32_t width = min_uint32(in_linesize, out_linesize[0]);
|
||||
uint32_t y;
|
||||
|
||||
__m128i lum_mask = _mm_set1_epi32(0x0000FF00);
|
||||
@@ -109,7 +109,7 @@ void compress_uyvx_to_i420(
|
||||
|
||||
pack_lum(lum_plane, lum_pos0, lum_pos1,
|
||||
line1, line2, lum_mask);
|
||||
pack_chroma_2plane(u_plane, v_plane,
|
||||
pack_ch_2plane(u_plane, v_plane,
|
||||
chroma_y_pos + (x>>1),
|
||||
line1, line2, uv_mask);
|
||||
}
|
||||
@@ -118,12 +118,12 @@ void compress_uyvx_to_i420(
|
||||
|
||||
void compress_uyvx_to_nv12(
|
||||
const uint8_t *input, uint32_t in_linesize,
|
||||
uint32_t width, uint32_t height,
|
||||
uint32_t start_y, uint32_t end_y,
|
||||
uint8_t *output[], const uint32_t out_linesize[])
|
||||
{
|
||||
uint8_t *lum_plane = output[0];
|
||||
uint8_t *chroma_plane = output[1];
|
||||
uint32_t width = min_uint32(in_linesize, out_linesize[0]);
|
||||
uint32_t y;
|
||||
|
||||
__m128i lum_mask = _mm_set1_epi32(0x0000FF00);
|
||||
@@ -146,7 +146,7 @@ void compress_uyvx_to_nv12(
|
||||
|
||||
pack_lum(lum_plane, lum_pos0, lum_pos1,
|
||||
line1, line2, lum_mask);
|
||||
pack_chroma_1plane(chroma_plane, chroma_y_pos + x,
|
||||
pack_ch_1plane(chroma_plane, chroma_y_pos + x,
|
||||
line1, line2, uv_mask);
|
||||
}
|
||||
}
|
||||
@@ -154,12 +154,11 @@ void compress_uyvx_to_nv12(
|
||||
|
||||
void decompress_420(
|
||||
const uint8_t *const input[], const uint32_t in_linesize[],
|
||||
uint32_t width, uint32_t height,
|
||||
uint32_t start_y, uint32_t end_y,
|
||||
uint8_t *output, uint32_t out_linesize)
|
||||
{
|
||||
uint32_t start_y_d2 = start_y/2;
|
||||
uint32_t width_d2 = width/2;
|
||||
uint32_t width_d2 = min_uint32(in_linesize[0], out_linesize)/2;
|
||||
uint32_t height_d2 = end_y/2;
|
||||
uint32_t y;
|
||||
|
||||
@@ -170,8 +169,8 @@ void decompress_420(
|
||||
register uint32_t *output0, *output1;
|
||||
uint32_t x;
|
||||
|
||||
lum0 = input[0] + y * 2*width;
|
||||
lum1 = lum0 + width;
|
||||
lum0 = input[0] + y * 2 * in_linesize[0];
|
||||
lum1 = lum0 + in_linesize[0];
|
||||
output0 = (uint32_t*)(output + y * 2 * in_linesize[0]);
|
||||
output1 = (uint32_t*)((uint8_t*)output0 + in_linesize[0]);
|
||||
|
||||
@@ -190,12 +189,11 @@ void decompress_420(
|
||||
|
||||
void decompress_nv12(
|
||||
const uint8_t *const input[], const uint32_t in_linesize[],
|
||||
uint32_t width, uint32_t height,
|
||||
uint32_t start_y, uint32_t end_y,
|
||||
uint8_t *output, uint32_t out_linesize)
|
||||
{
|
||||
uint32_t start_y_d2 = start_y/2;
|
||||
uint32_t width_d2 = width/2;
|
||||
uint32_t width_d2 = min_uint32(in_linesize[0], out_linesize)/2;
|
||||
uint32_t height_d2 = end_y/2;
|
||||
uint32_t y;
|
||||
|
||||
@@ -206,9 +204,9 @@ void decompress_nv12(
|
||||
uint32_t x;
|
||||
|
||||
chroma = (const uint16_t*)(input[1] + y * in_linesize[1]);
|
||||
lum0 = input[0] + y*2 * in_linesize[0];
|
||||
lum0 = input[0] + y * 2 * in_linesize[0];
|
||||
lum1 = lum0 + in_linesize[0];
|
||||
output0 = (uint32_t*)(output + y*2 * out_linesize);
|
||||
output0 = (uint32_t*)(output + y * 2 * out_linesize);
|
||||
output1 = (uint32_t*)((uint8_t*)output0 + out_linesize);
|
||||
|
||||
for (x = 0; x < width_d2; x++) {
|
||||
@@ -225,12 +223,11 @@ void decompress_nv12(
|
||||
|
||||
void decompress_422(
|
||||
const uint8_t *input, uint32_t in_linesize,
|
||||
uint32_t width, uint32_t height,
|
||||
uint32_t start_y, uint32_t end_y,
|
||||
uint8_t *output, uint32_t out_linesize,
|
||||
bool leading_lum)
|
||||
{
|
||||
uint32_t width_d2 = width >> 1;
|
||||
uint32_t width_d2 = min_uint32(in_linesize, out_linesize)/2;
|
||||
uint32_t y;
|
||||
|
||||
register const uint32_t *input32;
|
||||
|
@@ -29,31 +29,26 @@ extern "C" {
|
||||
|
||||
EXPORT void compress_uyvx_to_i420(
|
||||
const uint8_t *input, uint32_t in_linesize,
|
||||
uint32_t width, uint32_t height,
|
||||
uint32_t start_y, uint32_t end_y,
|
||||
uint8_t *output[], const uint32_t out_linesize[]);
|
||||
|
||||
EXPORT void compress_uyvx_to_nv12(
|
||||
const uint8_t *input, uint32_t in_linesize,
|
||||
uint32_t width, uint32_t height,
|
||||
uint32_t start_y, uint32_t end_y,
|
||||
uint8_t *output[], const uint32_t out_linesize[]);
|
||||
|
||||
EXPORT void decompress_nv12(
|
||||
const uint8_t *const input[], const uint32_t in_linesize[],
|
||||
uint32_t width, uint32_t height,
|
||||
uint32_t start_y, uint32_t end_y,
|
||||
uint8_t *output, uint32_t out_linesize);
|
||||
|
||||
EXPORT void decompress_420(
|
||||
const uint8_t *const input[], const uint32_t in_linesize[],
|
||||
uint32_t width, uint32_t height,
|
||||
uint32_t start_y, uint32_t end_y,
|
||||
uint8_t *output, uint32_t out_linesize);
|
||||
|
||||
EXPORT void decompress_422(
|
||||
const uint8_t *input, uint32_t in_linesize,
|
||||
uint32_t width, uint32_t height,
|
||||
uint32_t start_y, uint32_t end_y,
|
||||
uint8_t *output, uint32_t out_linesize,
|
||||
bool leading_lum);
|
||||
|
20
libobs/media-io/media-io-defs.h
Normal file
20
libobs/media-io/media-io-defs.h
Normal file
@@ -0,0 +1,20 @@
|
||||
/******************************************************************************
|
||||
Copyright (C) 2014 by Hugh Bailey <obs.jim@gmail.com>
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation, either version 2 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
******************************************************************************/
|
||||
|
||||
#pragma once
|
||||
|
||||
#define MAX_AV_PLANES 8
|
@@ -63,9 +63,6 @@ static inline void video_swapframes(struct video_output *video)
|
||||
|
||||
static inline void video_output_cur_frame(struct video_output *video)
|
||||
{
|
||||
size_t width = video->info.width;
|
||||
size_t height = video->info.height;
|
||||
|
||||
if (!video->cur_frame.data[0])
|
||||
return;
|
||||
|
||||
|
@@ -17,6 +17,7 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "media-io-defs.h"
|
||||
#include "../util/c99defs.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
@@ -25,8 +26,6 @@ extern "C" {
|
||||
|
||||
/* Base video output component. Use this to create an video output track. */
|
||||
|
||||
#define MAX_VIDEO_PLANES 8
|
||||
|
||||
struct video_output;
|
||||
typedef struct video_output *video_t;
|
||||
|
||||
@@ -49,8 +48,8 @@ enum video_format {
|
||||
};
|
||||
|
||||
struct video_frame {
|
||||
const uint8_t *data[MAX_VIDEO_PLANES];
|
||||
uint32_t linesize[MAX_VIDEO_PLANES];
|
||||
const uint8_t *data[MAX_AV_PLANES];
|
||||
uint32_t linesize[MAX_AV_PLANES];
|
||||
uint64_t timestamp;
|
||||
};
|
||||
|
||||
|
@@ -237,6 +237,8 @@ obs_data_t obs_data_create()
|
||||
obs_data_t obs_data_create_from_json(const char *json_string)
|
||||
{
|
||||
/* TODO */
|
||||
#pragma message ("TODO: implement obs_data_create_from_json")
|
||||
UNUSED_PARAMETER(json_string);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
@@ -143,7 +143,7 @@ static inline void render_display_begin(struct obs_display *display)
|
||||
gs_setviewport(0, 0, display->cx, display->cy);
|
||||
}
|
||||
|
||||
static inline void render_display_end(struct obs_display *display)
|
||||
static inline void render_display_end()
|
||||
{
|
||||
gs_endscene();
|
||||
gs_present();
|
||||
@@ -166,5 +166,5 @@ void render_display(struct obs_display *display)
|
||||
|
||||
pthread_mutex_unlock(&display->draw_callbacks_mutex);
|
||||
|
||||
render_display_end(display);
|
||||
render_display_end();
|
||||
}
|
||||
|
@@ -69,6 +69,8 @@ obs_encoder_t obs_encoder_create(const char *id, const char *name,
|
||||
pthread_mutex_lock(&obs->data.encoders_mutex);
|
||||
da_push_back(obs->data.encoders, &encoder);
|
||||
pthread_mutex_unlock(&obs->data.encoders_mutex);
|
||||
|
||||
encoder->name = bstrdup(name);
|
||||
return encoder;
|
||||
}
|
||||
|
||||
@@ -81,6 +83,7 @@ void obs_encoder_destroy(obs_encoder_t encoder)
|
||||
|
||||
encoder->info.destroy(encoder->data);
|
||||
obs_data_release(encoder->settings);
|
||||
bfree(encoder->name);
|
||||
bfree(encoder);
|
||||
}
|
||||
}
|
||||
@@ -88,53 +91,73 @@ void obs_encoder_destroy(obs_encoder_t encoder)
|
||||
obs_properties_t obs_encoder_properties(const char *id, const char *locale)
|
||||
{
|
||||
const struct obs_encoder_info *ei = get_encoder_info(id);
|
||||
if (ei && ei->properties)
|
||||
return ei->properties(locale);
|
||||
if (ei && ei->get_properties)
|
||||
return ei->get_properties(locale);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void obs_encoder_update(obs_encoder_t encoder, obs_data_t settings)
|
||||
{
|
||||
if (!encoder) return;
|
||||
|
||||
obs_data_replace(&encoder->settings, settings);
|
||||
encoder->info.update(encoder->data, encoder->settings);
|
||||
}
|
||||
|
||||
bool obs_encoder_reset(obs_encoder_t encoder)
|
||||
bool obs_encoder_reset(obs_encoder_t encoder, obs_data_t settings)
|
||||
{
|
||||
return encoder->info.reset(encoder->data);
|
||||
if (!encoder) return false;
|
||||
|
||||
return encoder->info.reset(encoder->data, settings);
|
||||
}
|
||||
|
||||
bool obs_encoder_encode(obs_encoder_t encoder, void *frames, size_t size)
|
||||
bool obs_encoder_encode(obs_encoder_t encoder,
|
||||
const struct encoder_frame *frame,
|
||||
struct encoder_packet *packet, bool *received_packet)
|
||||
{
|
||||
/* TODO */
|
||||
//encoder->info.encode(encoder->data, frames, size, packets);
|
||||
return false;
|
||||
if (!encoder) return false;
|
||||
|
||||
return encoder->info.encode(encoder->data, frame, packet,
|
||||
received_packet);
|
||||
}
|
||||
|
||||
int obs_encoder_getheader(obs_encoder_t encoder,
|
||||
struct encoder_packet **packets)
|
||||
bool obs_encoder_get_extra_data(obs_encoder_t encoder, uint8_t **extra_data,
|
||||
size_t *size)
|
||||
{
|
||||
return encoder->info.getheader(encoder, packets);
|
||||
}
|
||||
if (!encoder) return false;
|
||||
|
||||
bool obs_encoder_setbitrate(obs_encoder_t encoder, uint32_t bitrate,
|
||||
uint32_t buffersize)
|
||||
{
|
||||
if (encoder->info.setbitrate)
|
||||
return encoder->info.setbitrate(encoder->data, bitrate,
|
||||
buffersize);
|
||||
return false;
|
||||
}
|
||||
if (encoder->info.get_extra_data)
|
||||
return encoder->info.get_extra_data(encoder, extra_data, size);
|
||||
|
||||
bool obs_encoder_request_keyframe(obs_encoder_t encoder)
|
||||
{
|
||||
if (encoder->info.request_keyframe)
|
||||
return encoder->info.request_keyframe(encoder->data);
|
||||
return false;
|
||||
}
|
||||
|
||||
obs_data_t obs_encoder_get_settings(obs_encoder_t encoder)
|
||||
{
|
||||
if (!encoder) return NULL;
|
||||
|
||||
obs_data_addref(encoder->settings);
|
||||
return encoder->settings;
|
||||
}
|
||||
|
||||
bool obs_encoder_start(obs_encoder_t encoder,
|
||||
void (*new_packet)(void *param, struct encoder_packet *packet),
|
||||
void *param)
|
||||
{
|
||||
#pragma message ("TODO: implement obs_encoder_start")
|
||||
UNUSED_PARAMETER(encoder);
|
||||
UNUSED_PARAMETER(new_packet);
|
||||
UNUSED_PARAMETER(param);
|
||||
return false;
|
||||
}
|
||||
|
||||
void obs_encoder_stop(obs_encoder_t encoder,
|
||||
void (*new_packet)(void *param, struct encoder_packet *packet),
|
||||
void *param)
|
||||
{
|
||||
#pragma message ("TODO: implement obs_encoder_stop")
|
||||
UNUSED_PARAMETER(encoder);
|
||||
UNUSED_PARAMETER(new_packet);
|
||||
UNUSED_PARAMETER(param);
|
||||
return;
|
||||
}
|
||||
|
@@ -17,27 +17,153 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
struct obs_encoder_info {
|
||||
const char *id;
|
||||
|
||||
const char *(*getname)(const char *locale);
|
||||
|
||||
void *(*create)(obs_data_t settings, obs_encoder_t encoder);
|
||||
void (*destroy)(void *data);
|
||||
|
||||
bool (*reset)(void *data);
|
||||
|
||||
int (*encode)(void *data, void *frames, size_t size,
|
||||
struct encoder_packet **packets);
|
||||
int (*getheader)(void *data, struct encoder_packet **packets);
|
||||
|
||||
/* optional */
|
||||
void (*update)(void *data, obs_data_t settings);
|
||||
|
||||
obs_properties_t (*properties)(const char *locale);
|
||||
|
||||
bool (*setbitrate)(void *data, uint32_t bitrate, uint32_t buffersize);
|
||||
bool (*request_keyframe)(void *data);
|
||||
/** Specifies the encoder type */
|
||||
enum obs_encoder_type {
|
||||
OBS_PACKET_AUDIO,
|
||||
OBS_PACKET_VIDEO
|
||||
};
|
||||
|
||||
/** Encoder output packet */
|
||||
struct encoder_packet {
|
||||
uint8_t *data; /**< Packet data */
|
||||
size_t size; /**< Packet size */
|
||||
|
||||
int64_t pts; /**< Presentation timestamp */
|
||||
int64_t dts; /**< Decode timestamp */
|
||||
|
||||
enum obs_encoder_type type; /**< Encoder type */
|
||||
|
||||
/**
|
||||
* Packet priority
|
||||
*
|
||||
* This is generally use by video encoders to specify the priority
|
||||
* of the packet. If this frame is dropped, it will have to wait for
|
||||
* another packet of drop_priority.
|
||||
*/
|
||||
int priority;
|
||||
|
||||
/**
|
||||
* Dropped packet priority
|
||||
*
|
||||
* If this packet is dropped, the next packet must be of this priority
|
||||
* or higher to continue transmission.
|
||||
*/
|
||||
int drop_priority;
|
||||
};
|
||||
|
||||
/** Encoder input frame */
|
||||
struct encoder_frame {
|
||||
/** Data for the frame/audio */
|
||||
uint8_t *data[MAX_AV_PLANES];
|
||||
|
||||
/** size of each plane */
|
||||
uint32_t linesize[MAX_AV_PLANES];
|
||||
|
||||
/** Number of frames (audio only) */
|
||||
uint32_t frames;
|
||||
|
||||
/** Presentation timestamp */
|
||||
int64_t pts;
|
||||
};
|
||||
|
||||
/**
|
||||
* Encoder interface
|
||||
*
|
||||
* Encoders have a limited usage with OBS. You are not generally supposed to
|
||||
* implement every encoder out there. Generally, these are limited or specific
|
||||
* encoders for h264/aac for streaming and recording. It doesn't have to be
|
||||
* *just* h264 or aac of course, but generally those are the expected encoders.
|
||||
*
|
||||
* That being said, other encoders will be kept in mind for future use.
|
||||
*/
|
||||
struct obs_encoder_info {
|
||||
/* ----------------------------------------------------------------- */
|
||||
/* Required implementation*/
|
||||
|
||||
/** Specifies the named identifier of this encoder */
|
||||
const char *id;
|
||||
|
||||
/**
|
||||
* Gets the full translated name of this encoder
|
||||
*
|
||||
* @param locale Locale to use for translation
|
||||
* @return Translated name of the encoder
|
||||
*/
|
||||
const char *(*getname)(const char *locale);
|
||||
|
||||
/**
|
||||
* Creates the encoder with the specified settings
|
||||
*
|
||||
* @param settings Settings for the encoder
|
||||
* @param encoder OBS encoder context
|
||||
* @return Data associated with this encoder context
|
||||
*/
|
||||
void *(*create)(obs_data_t settings, obs_encoder_t encoder);
|
||||
|
||||
/**
|
||||
* Destroys the encoder data
|
||||
*
|
||||
* @param data Data associated with this encoder context
|
||||
*/
|
||||
void (*destroy)(void *data);
|
||||
|
||||
/**
|
||||
* Resets the encoder with the specified settings
|
||||
*
|
||||
* @param data Data associated with this encoder context
|
||||
* @param settings New settings for the encoder
|
||||
* @return true if successful, false otherwise
|
||||
*/
|
||||
bool (*reset)(void *data, obs_data_t settings);
|
||||
|
||||
/**
|
||||
* Encodes frame(s), and outputs encoded packets as they become
|
||||
* available.
|
||||
*
|
||||
* @param data Data associated with this encoder
|
||||
* context
|
||||
* @param[in] frame Raw audio/video data to encode
|
||||
* @param[out] packet Encoder packet output, if any
|
||||
* @param[out] received_packet Set to true if a packet was received,
|
||||
* false otherwise
|
||||
* @return true if successful, false otherwise.
|
||||
*/
|
||||
int (*encode)(void *data, const struct encoder_frame *frame,
|
||||
struct encoder_packet *packet, bool *received_packet);
|
||||
|
||||
/* ----------------------------------------------------------------- */
|
||||
/* Optional implementation */
|
||||
|
||||
/**
|
||||
* Gets the property information of this encoder
|
||||
*
|
||||
* @param locale The locale to translate with
|
||||
* @return The properties data
|
||||
*/
|
||||
obs_properties_t (*get_properties)(const char *locale);
|
||||
|
||||
/**
|
||||
* Updates the settings for this encoder
|
||||
*
|
||||
* @param data Data associated with this encoder context
|
||||
* @param settings New settings for this encoder
|
||||
*/
|
||||
void (*update)(void *data, obs_data_t settings);
|
||||
|
||||
/**
|
||||
* Returns extra data associated with this encoder (usually header)
|
||||
*
|
||||
* @param data Data associated with this encoder context
|
||||
* @param extra_data Pointer to receive the extra data
|
||||
* @param size Pointer to receive the size of the extra data
|
||||
*/
|
||||
bool (*get_extra_data)(void *data, uint8_t **extra_data, size_t *size);
|
||||
};
|
||||
|
||||
/**
|
||||
* Register an encoder definition to the current obs context. This should be
|
||||
* used in obs_module_load.
|
||||
*
|
||||
* @param info Pointer to the source definition structure.
|
||||
*/
|
||||
EXPORT void obs_register_encoder(const struct obs_encoder_info *info);
|
||||
|
@@ -202,7 +202,6 @@ void obs_register_encoder(const struct obs_encoder_info *info)
|
||||
CHECK_REQUIRED_VAL(info, destroy, obs_register_encoder);
|
||||
CHECK_REQUIRED_VAL(info, reset, obs_register_encoder);
|
||||
CHECK_REQUIRED_VAL(info, encode, obs_register_encoder);
|
||||
CHECK_REQUIRED_VAL(info, getheader, obs_register_encoder);
|
||||
|
||||
REGISTER_OBS_DEF(cur_encoder_info_size, obs_encoder_info,
|
||||
obs->encoder_types, info);
|
||||
@@ -210,12 +209,9 @@ void obs_register_encoder(const struct obs_encoder_info *info)
|
||||
|
||||
void obs_register_service(const struct obs_service_info *info)
|
||||
{
|
||||
CHECK_REQUIRED_VAL(info, getname, obs_register_service);
|
||||
CHECK_REQUIRED_VAL(info, create, obs_register_service);
|
||||
CHECK_REQUIRED_VAL(info, destroy, obs_register_service);
|
||||
|
||||
REGISTER_OBS_DEF(cur_service_info_size, obs_service_info,
|
||||
obs->service_types, info);
|
||||
/* TODO */
|
||||
#pragma message ("TODO: implement obs_register_service")
|
||||
UNUSED_PARAMETER(info);
|
||||
}
|
||||
|
||||
void obs_regsiter_modal_ui(const struct obs_modal_ui *info)
|
||||
|
@@ -31,8 +31,8 @@ static inline void signal_item_remove(struct obs_scene_item *item)
|
||||
|
||||
static const char *scene_getname(const char *locale)
|
||||
{
|
||||
/* TODO: locale lookup of display name */
|
||||
return "Scene";
|
||||
UNUSED_PARAMETER(locale);
|
||||
return "Scene internal source type";
|
||||
}
|
||||
|
||||
static void *scene_create(obs_data_t settings, struct obs_source *source)
|
||||
@@ -51,6 +51,7 @@ static void *scene_create(obs_data_t settings, struct obs_source *source)
|
||||
goto fail;
|
||||
}
|
||||
|
||||
UNUSED_PARAMETER(settings);
|
||||
return scene;
|
||||
|
||||
fail:
|
||||
@@ -142,11 +143,20 @@ static void scene_video_render(void *data, effect_t effect)
|
||||
}
|
||||
|
||||
pthread_mutex_unlock(&scene->mutex);
|
||||
|
||||
UNUSED_PARAMETER(effect);
|
||||
}
|
||||
|
||||
static uint32_t scene_getsize(void *data)
|
||||
static uint32_t scene_getwidth(void *data)
|
||||
{
|
||||
return 0;
|
||||
UNUSED_PARAMETER(data);
|
||||
return obs->video.base_width;
|
||||
}
|
||||
|
||||
static uint32_t scene_getheight(void *data)
|
||||
{
|
||||
UNUSED_PARAMETER(data);
|
||||
return obs->video.base_height;
|
||||
}
|
||||
|
||||
static const struct obs_source_info scene_info =
|
||||
@@ -158,8 +168,8 @@ static const struct obs_source_info scene_info =
|
||||
.create = scene_create,
|
||||
.destroy = scene_destroy,
|
||||
.video_render = scene_video_render,
|
||||
.getwidth = scene_getsize,
|
||||
.getheight = scene_getsize,
|
||||
.getwidth = scene_getwidth,
|
||||
.getheight = scene_getheight,
|
||||
};
|
||||
|
||||
obs_scene_t obs_scene_create(const char *name)
|
||||
|
@@ -22,7 +22,7 @@ struct obs_service_info {
|
||||
char *id;
|
||||
|
||||
const char *(*getname)(const char *locale);
|
||||
|
||||
#if 0
|
||||
void *(*create)(obs_data_t settings, struct service_data *service);
|
||||
void (*destroy)(void *data);
|
||||
|
||||
@@ -32,4 +32,5 @@ struct obs_service_info {
|
||||
/* get stream url/key */
|
||||
/* get (viewers/etc) */
|
||||
/* send (current game/title/activate commercial/etc) */
|
||||
#endif
|
||||
};
|
||||
|
@@ -172,7 +172,7 @@ void source_frame_init(struct source_frame *frame,
|
||||
enum video_format format, uint32_t width, uint32_t height)
|
||||
{
|
||||
size_t size;
|
||||
size_t offsets[MAX_VIDEO_PLANES];
|
||||
size_t offsets[MAX_AV_PLANES];
|
||||
int alignment = base_get_alignment();
|
||||
|
||||
memset(offsets, 0, sizeof(offsets));
|
||||
@@ -255,7 +255,7 @@ static void obs_source_destroy(obs_source_t source)
|
||||
if (source->data)
|
||||
source->info.destroy(source->data);
|
||||
|
||||
for (i = 0; i < MAX_AUDIO_PLANES; i++)
|
||||
for (i = 0; i < MAX_AV_PLANES; i++)
|
||||
bfree(source->audio_data.data[i]);
|
||||
|
||||
audio_line_destroy(source->audio_line);
|
||||
@@ -366,7 +366,7 @@ void obs_source_video_tick(obs_source_t source, float seconds)
|
||||
}
|
||||
|
||||
/* unless the value is 3+ hours worth of frames, this won't overflow */
|
||||
static inline uint64_t conv_frames_to_time(obs_source_t source, size_t frames)
|
||||
static inline uint64_t conv_frames_to_time(size_t frames)
|
||||
{
|
||||
const struct audio_output_info *info;
|
||||
info = audio_output_getinfo(obs->audio.audio);
|
||||
@@ -423,7 +423,7 @@ static void source_output_audio_line(obs_source_t source,
|
||||
}
|
||||
|
||||
source->next_audio_ts_min = in.timestamp +
|
||||
conv_frames_to_time(source, in.frames);
|
||||
conv_frames_to_time(in.frames);
|
||||
|
||||
if (source->audio_reset_ref != 0)
|
||||
return;
|
||||
@@ -499,24 +499,22 @@ static bool upload_frame(texture_t tex, const struct source_frame *frame)
|
||||
return false;
|
||||
|
||||
if (type == CONVERT_420)
|
||||
decompress_420(frame->data, frame->linesize,
|
||||
frame->width, frame->height, 0, frame->height,
|
||||
ptr, linesize);
|
||||
decompress_420((const uint8_t* const*)frame->data,
|
||||
frame->linesize,
|
||||
0, frame->height, ptr, linesize);
|
||||
|
||||
else if (type == CONVERT_NV12)
|
||||
decompress_nv12(frame->data, frame->linesize,
|
||||
frame->width, frame->height, 0, frame->height,
|
||||
ptr, linesize);
|
||||
decompress_nv12((const uint8_t* const*)frame->data,
|
||||
frame->linesize,
|
||||
0, frame->height, ptr, linesize);
|
||||
|
||||
else if (type == CONVERT_422_Y)
|
||||
decompress_422(frame->data[0], frame->linesize[0],
|
||||
frame->width, frame->height, 0, frame->height,
|
||||
ptr, linesize, true);
|
||||
0, frame->height, ptr, linesize, true);
|
||||
|
||||
else if (type == CONVERT_422_U)
|
||||
decompress_422(frame->data[0], frame->linesize[0],
|
||||
frame->width, frame->height, 0, frame->height,
|
||||
ptr, linesize, false);
|
||||
0, frame->height, ptr, linesize, false);
|
||||
|
||||
texture_unmap(tex);
|
||||
return true;
|
||||
@@ -799,8 +797,7 @@ static void copy_frame_data(struct source_frame *dst,
|
||||
}
|
||||
}
|
||||
|
||||
static inline struct source_frame *cache_video(obs_source_t source,
|
||||
const struct source_frame *frame)
|
||||
static inline struct source_frame *cache_video(const struct source_frame *frame)
|
||||
{
|
||||
/* TODO: use an actual cache */
|
||||
struct source_frame *new_frame = source_frame_create(frame->format,
|
||||
@@ -813,7 +810,7 @@ static inline struct source_frame *cache_video(obs_source_t source,
|
||||
void obs_source_output_video(obs_source_t source,
|
||||
const struct source_frame *frame)
|
||||
{
|
||||
struct source_frame *output = cache_video(source, frame);
|
||||
struct source_frame *output = cache_video(frame);
|
||||
|
||||
pthread_mutex_lock(&source->filter_mutex);
|
||||
output = filter_async_video(source, output);
|
||||
@@ -875,15 +872,15 @@ static inline void reset_resampler(obs_source_t source,
|
||||
}
|
||||
|
||||
static inline void copy_audio_data(obs_source_t source,
|
||||
const void *const data[], uint32_t frames, uint64_t timestamp)
|
||||
const uint8_t *const data[], uint32_t frames, uint64_t ts)
|
||||
{
|
||||
size_t planes = audio_output_planes(obs->audio.audio);
|
||||
size_t blocksize = audio_output_blocksize(obs->audio.audio);
|
||||
size_t size = (size_t)frames * blocksize;
|
||||
bool resize = source->audio_storage_size < size;
|
||||
|
||||
source->audio_data.frames = frames;
|
||||
source->audio_data.timestamp = timestamp;
|
||||
source->audio_data.frames = frames;
|
||||
source->audio_data.timestamp = ts;
|
||||
|
||||
for (size_t i = 0; i < planes; i++) {
|
||||
/* ensure audio storage capacity */
|
||||
@@ -911,7 +908,7 @@ static void process_audio(obs_source_t source, const struct source_audio *audio)
|
||||
return;
|
||||
|
||||
if (source->resampler) {
|
||||
uint8_t *output[MAX_AUDIO_PLANES];
|
||||
uint8_t *output[MAX_AV_PLANES];
|
||||
uint32_t frames;
|
||||
uint64_t offset;
|
||||
|
||||
@@ -921,7 +918,7 @@ static void process_audio(obs_source_t source, const struct source_audio *audio)
|
||||
output, &frames, &offset,
|
||||
audio->data, audio->frames);
|
||||
|
||||
copy_audio_data(source, output, frames,
|
||||
copy_audio_data(source, (const uint8_t *const *)output, frames,
|
||||
audio->timestamp - offset);
|
||||
} else {
|
||||
copy_audio_data(source, audio->data, audio->frames,
|
||||
@@ -933,7 +930,6 @@ void obs_source_output_audio(obs_source_t source,
|
||||
const struct source_audio *audio)
|
||||
{
|
||||
uint32_t flags = obs_source_get_output_flags(source);
|
||||
size_t blocksize = audio_output_blocksize(obs->audio.audio);
|
||||
struct filtered_audio *output;
|
||||
|
||||
process_audio(source, audio);
|
||||
@@ -951,7 +947,7 @@ void obs_source_output_audio(obs_source_t source,
|
||||
if (source->timing_set || async) {
|
||||
struct audio_data data;
|
||||
|
||||
for (int i = 0; i < MAX_AUDIO_PLANES; i++)
|
||||
for (int i = 0; i < MAX_AV_PLANES; i++)
|
||||
data.data[i] = output->data[i];
|
||||
|
||||
data.frames = output->frames;
|
||||
@@ -1022,7 +1018,6 @@ static inline struct source_frame *get_closest_frame(obs_source_t source,
|
||||
struct source_frame *obs_source_getframe(obs_source_t source)
|
||||
{
|
||||
struct source_frame *frame = NULL;
|
||||
uint64_t last_frame_time = source->last_frame_ts;
|
||||
int audio_time_refs = 0;
|
||||
uint64_t sys_time;
|
||||
|
||||
@@ -1087,11 +1082,10 @@ void obs_source_gettype(obs_source_t source, enum obs_source_type *type,
|
||||
}
|
||||
|
||||
static inline void render_filter_bypass(obs_source_t target, effect_t effect,
|
||||
uint32_t width, uint32_t height, bool use_matrix)
|
||||
bool use_matrix)
|
||||
{
|
||||
const char *tech_name = use_matrix ? "DrawMatrix" : "Draw";
|
||||
technique_t tech = effect_gettechnique(effect, tech_name);
|
||||
eparam_t image = effect_getparambyname(effect, "image");
|
||||
size_t passes, i;
|
||||
|
||||
passes = technique_begin(tech);
|
||||
@@ -1141,7 +1135,7 @@ void obs_source_process_filter(obs_source_t filter, effect_t effect,
|
||||
* using the filter effect instead of rendering to texture to reduce
|
||||
* the total number of passes */
|
||||
if (can_directly && expects_def && target == parent) {
|
||||
render_filter_bypass(target, effect, width, height, use_matrix);
|
||||
render_filter_bypass(target, effect, use_matrix);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@@ -136,8 +136,7 @@ struct obs_source_info {
|
||||
* Gets the property information of this source
|
||||
*
|
||||
* @param locale The locale to translate with
|
||||
* @return The properties data. Caller is responsible for
|
||||
* freeing the data with obs_properties_destroy
|
||||
* @return The properties data
|
||||
*/
|
||||
obs_properties_t (*get_properties)(const char *locale);
|
||||
|
||||
|
@@ -79,7 +79,7 @@ static inline void unmap_last_surface(struct obs_core_video *video)
|
||||
}
|
||||
|
||||
static inline void render_main_texture(struct obs_core_video *video,
|
||||
int cur_texture, int prev_texture)
|
||||
int cur_texture)
|
||||
{
|
||||
struct vec4 clear_color;
|
||||
vec4_set(&clear_color, 0.3f, 0.0f, 0.0f, 1.0f);
|
||||
@@ -161,7 +161,7 @@ static inline void render_video(struct obs_core_video *video, int cur_texture,
|
||||
gs_enable_depthtest(false);
|
||||
gs_setcullmode(GS_NEITHER);
|
||||
|
||||
render_main_texture(video, cur_texture, prev_texture);
|
||||
render_main_texture(video, cur_texture);
|
||||
render_output_texture(video, cur_texture, prev_texture);
|
||||
stage_output_texture(video, cur_texture, prev_texture);
|
||||
|
||||
@@ -171,7 +171,7 @@ static inline void render_video(struct obs_core_video *video, int cur_texture,
|
||||
}
|
||||
|
||||
/* TODO: replace with more optimal conversion */
|
||||
static inline bool download_frame(struct obs_core_video *video, int cur_texture,
|
||||
static inline bool download_frame(struct obs_core_video *video,
|
||||
int prev_texture, struct video_frame *frame)
|
||||
{
|
||||
stagesurf_t surface = video->copy_surfaces[prev_texture];
|
||||
@@ -195,14 +195,12 @@ static bool convert_frame(struct obs_core_video *video,
|
||||
if (info->format == VIDEO_FORMAT_I420) {
|
||||
compress_uyvx_to_i420(
|
||||
frame->data[0], frame->linesize[0],
|
||||
info->width, info->height,
|
||||
0, info->height,
|
||||
new_frame->data, new_frame->linesize);
|
||||
|
||||
} else if (info->format == VIDEO_FORMAT_NV12) {
|
||||
compress_uyvx_to_nv12(
|
||||
frame->data[0], frame->linesize[0],
|
||||
info->width, info->height,
|
||||
0, info->height,
|
||||
new_frame->data, new_frame->linesize);
|
||||
|
||||
@@ -211,7 +209,7 @@ static bool convert_frame(struct obs_core_video *video,
|
||||
return false;
|
||||
}
|
||||
|
||||
for (size_t i = 0; i < MAX_VIDEO_PLANES; i++) {
|
||||
for (size_t i = 0; i < MAX_AV_PLANES; i++) {
|
||||
frame->data[i] = new_frame->data[i];
|
||||
frame->linesize[i] = new_frame->linesize[i];
|
||||
}
|
||||
@@ -246,7 +244,7 @@ static inline void output_frame(uint64_t timestamp)
|
||||
gs_entercontext(obs_graphics());
|
||||
|
||||
render_video(video, cur_texture, prev_texture);
|
||||
frame_ready = download_frame(video, cur_texture, prev_texture, &frame);
|
||||
frame_ready = download_frame(video, prev_texture, &frame);
|
||||
|
||||
gs_leavecontext();
|
||||
|
||||
@@ -272,5 +270,6 @@ void *obs_video_thread(void *param)
|
||||
|
||||
}
|
||||
|
||||
UNUSED_PARAMETER(param);
|
||||
return NULL;
|
||||
}
|
||||
|
@@ -187,7 +187,6 @@ static void obs_free_graphics(void)
|
||||
size_t i;
|
||||
|
||||
if (video->graphics) {
|
||||
int cur_texture = video->cur_texture;
|
||||
gs_entercontext(video->graphics);
|
||||
|
||||
if (video->mapped_surface)
|
||||
@@ -246,7 +245,6 @@ static bool obs_init_data(void)
|
||||
{
|
||||
struct obs_core_data *data = &obs->data;
|
||||
pthread_mutexattr_t attr;
|
||||
bool success = false;
|
||||
|
||||
pthread_mutex_init_value(&obs->data.displays_mutex);
|
||||
|
||||
|
58
libobs/obs.h
58
libobs/obs.h
@@ -125,7 +125,7 @@ struct obs_video_info {
|
||||
* audio data
|
||||
*/
|
||||
struct filtered_audio {
|
||||
uint8_t *data[MAX_AUDIO_PLANES];
|
||||
uint8_t *data[MAX_AV_PLANES];
|
||||
uint32_t frames;
|
||||
uint64_t timestamp;
|
||||
};
|
||||
@@ -135,7 +135,7 @@ struct filtered_audio {
|
||||
* source audio. Audio is automatically resampled and remixed as necessary.
|
||||
*/
|
||||
struct source_audio {
|
||||
const uint8_t *data[MAX_AUDIO_PLANES];
|
||||
const uint8_t *data[MAX_AV_PLANES];
|
||||
uint32_t frames;
|
||||
|
||||
enum speaker_layout speakers;
|
||||
@@ -155,8 +155,8 @@ struct source_audio {
|
||||
* converted to RGB via shader on the graphics processor.
|
||||
*/
|
||||
struct source_frame {
|
||||
uint8_t *data[MAX_VIDEO_PLANES];
|
||||
uint32_t linesize[MAX_VIDEO_PLANES];
|
||||
uint8_t *data[MAX_AV_PLANES];
|
||||
uint32_t linesize[MAX_AV_PLANES];
|
||||
uint32_t width;
|
||||
uint32_t height;
|
||||
uint64_t timestamp;
|
||||
@@ -166,22 +166,6 @@ struct source_frame {
|
||||
bool flip;
|
||||
};
|
||||
|
||||
enum packet_priority {
|
||||
PACKET_PRIORITY_DISPOSABLE,
|
||||
PACKET_PRIORITY_LOW,
|
||||
PACKET_PRIORITY_PFRAME,
|
||||
PACKET_PRIORITY_IFRAME,
|
||||
PACKET_PRIORITY_OTHER /* audio usually */
|
||||
};
|
||||
|
||||
struct encoder_packet {
|
||||
int64_t dts;
|
||||
int64_t pts;
|
||||
void *data;
|
||||
size_t size;
|
||||
enum packet_priority priority;
|
||||
};
|
||||
|
||||
/* ------------------------------------------------------------------------- */
|
||||
/* OBS context */
|
||||
|
||||
@@ -634,30 +618,28 @@ EXPORT obs_encoder_t obs_encoder_create(const char *id, const char *name,
|
||||
obs_data_t settings);
|
||||
EXPORT void obs_encoder_destroy(obs_encoder_t encoder);
|
||||
|
||||
EXPORT bool obs_encoder_reset(obs_encoder_t encoder, obs_data_t settings);
|
||||
|
||||
EXPORT bool obs_encoder_encode(obs_encoder_t encoder,
|
||||
const struct encoder_frame *frame,
|
||||
struct encoder_packet *packet,
|
||||
bool *received_packet);
|
||||
|
||||
EXPORT bool obs_encoder_start(obs_encoder_t encoder,
|
||||
void (*new_packet)(void *param, struct encoder_packet *packet),
|
||||
void *param);
|
||||
EXPORT void obs_encoder_stop(obs_encoder_t encoder,
|
||||
void (*new_packet)(void *param, struct encoder_packet *packet),
|
||||
void *param);
|
||||
|
||||
/** Returns the property list, if any. Free with obs_properties_destroy */
|
||||
EXPORT obs_properties_t obs_output_properties(const char *id,
|
||||
const char *locale);
|
||||
|
||||
EXPORT void obs_encoder_update(obs_encoder_t encoder, obs_data_t settings);
|
||||
|
||||
EXPORT bool obs_encoder_reset(obs_encoder_t encoder);
|
||||
|
||||
EXPORT bool obs_encoder_encode(obs_encoder_t encoder, void *frames,
|
||||
size_t size);
|
||||
EXPORT int obs_encoder_getheader(obs_encoder_t encoder,
|
||||
struct encoder_packet **packets);
|
||||
|
||||
EXPORT bool obs_encoder_start(obs_encoder_t encoder,
|
||||
void (*new_packet)(void *param, struct encoder_packet *packet),
|
||||
void *param);
|
||||
EXPORT bool obs_encoder_stop(obs_encoder_t encoder,
|
||||
void (*new_packet)(void *param, struct encoder_packet *packet),
|
||||
void *param);
|
||||
|
||||
EXPORT bool obs_encoder_setbitrate(obs_encoder_t encoder, uint32_t bitrate,
|
||||
uint32_t buffersize);
|
||||
|
||||
EXPORT bool obs_encoder_request_keyframe(obs_encoder_t encoder);
|
||||
EXPORT bool obs_encoder_get_extra_data(obs_encoder_t encoder,
|
||||
uint8_t **extra_data, size_t *size);
|
||||
|
||||
EXPORT obs_data_t obs_encoder_get_settings(obs_encoder_t encoder);
|
||||
|
||||
|
@@ -21,6 +21,8 @@
|
||||
* bool, inline, stdint
|
||||
*/
|
||||
|
||||
#define UNUSED_PARAMETER(param) (void)param
|
||||
|
||||
#ifdef _MSC_VER
|
||||
#define FORCE_INLINE __forceinline
|
||||
#else
|
||||
|
Reference in New Issue
Block a user