Remove majority of warnings
There were a *lot* of warnings, managed to remove most of them. Also, put warning flags before C_FLAGS and CXX_FLAGS, rather than after, as -Wall -Wextra was overwriting flags that came before it.
This commit is contained in:
parent
4bc282f5e9
commit
966b943d5b
@ -31,8 +31,8 @@ if(${CMAKE_C_COMPILER_ID} MATCHES "Clang" OR ${CMAKE_CXX_COMPILER_ID} MATCHES "C
|
||||
endif()
|
||||
|
||||
if(CMAKE_COMPILER_IS_GNUCC OR CMAKE_COMPILER_IS_GNUCXX OR CMAKE_COMPILER_IS_CLANG)
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-unused-function -Wno-unused-parameter -Wall -Wextra")
|
||||
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -std=gnu99 -Wno-unused-function -Wno-unused-parameter -Wall -Wextra")
|
||||
set(CMAKE_CXX_FLAGS "-Wall -Wextra -Wno-unused-function ${CMAKE_CXX_FLAGS}")
|
||||
set(CMAKE_C_FLAGS "-Wall -Wextra -Wno-unused-function -Wno-missing-field-initializers ${CMAKE_C_FLAGS} -std=gnu99")
|
||||
|
||||
option(USE_LIBC++ "Use libc++ instead of libstdc++" ${APPLE})
|
||||
if(USE_LIBC++)
|
||||
|
@ -58,8 +58,7 @@ EXPORT vertbuffer_t device_create_vertexbuffer(device_t device,
|
||||
EXPORT indexbuffer_t device_create_indexbuffer(device_t device,
|
||||
enum gs_index_type type, void *indices, size_t num,
|
||||
uint32_t flags);
|
||||
EXPORT enum gs_texture_type device_gettexturetype(device_t device,
|
||||
texture_t texture);
|
||||
EXPORT enum gs_texture_type device_gettexturetype(texture_t texture);
|
||||
EXPORT void device_load_vertexbuffer(device_t device, vertbuffer_t vertbuffer);
|
||||
EXPORT void device_load_indexbuffer(device_t device, indexbuffer_t indexbuffer);
|
||||
EXPORT void device_load_texture(device_t device, texture_t tex, int unit);
|
||||
@ -150,7 +149,7 @@ EXPORT void stagesurface_destroy(stagesurf_t stagesurf);
|
||||
EXPORT uint32_t stagesurface_getwidth(stagesurf_t stagesurf);
|
||||
EXPORT uint32_t stagesurface_getheight(stagesurf_t stagesurf);
|
||||
EXPORT enum gs_color_format stagesurface_getcolorformat(stagesurf_t stagesurf);
|
||||
EXPORT bool stagesurface_map(stagesurf_t stagesurf, const void **data,
|
||||
EXPORT bool stagesurface_map(stagesurf_t stagesurf, const uint8_t **data,
|
||||
uint32_t *linesize);
|
||||
EXPORT void stagesurface_unmap(stagesurf_t stagesurf);
|
||||
|
||||
|
@ -688,7 +688,7 @@ indexbuffer_t device_create_indexbuffer(device_t device,
|
||||
return buffer;
|
||||
}
|
||||
|
||||
enum gs_texture_type device_gettexturetype(device_t device, texture_t texture)
|
||||
enum gs_texture_type device_gettexturetype(texture_t texture)
|
||||
{
|
||||
return texture->type;
|
||||
}
|
||||
@ -1547,7 +1547,7 @@ enum gs_color_format stagesurface_getcolorformat(stagesurf_t stagesurf)
|
||||
return stagesurf->format;
|
||||
}
|
||||
|
||||
bool stagesurface_map(stagesurf_t stagesurf, const void **data,
|
||||
bool stagesurface_map(stagesurf_t stagesurf, const uint8_t **data,
|
||||
uint32_t *linesize)
|
||||
{
|
||||
D3D11_MAPPED_SUBRESOURCE map;
|
||||
@ -1555,7 +1555,7 @@ bool stagesurface_map(stagesurf_t stagesurf, const void **data,
|
||||
D3D11_MAP_READ, 0, &map)))
|
||||
return false;
|
||||
|
||||
*data = map.pData;
|
||||
*data = (uint8_t*)map.pData;
|
||||
*linesize = map.RowPitch;
|
||||
return true;
|
||||
}
|
||||
|
@ -6,7 +6,7 @@
|
||||
#if defined(__APPLE__)
|
||||
#include <dlfcn.h>
|
||||
|
||||
static void* AppleGLGetProcAddress (const const char *name)
|
||||
static void* AppleGLGetProcAddress (const char *name)
|
||||
{
|
||||
static void* image = NULL;
|
||||
|
||||
|
@ -56,8 +56,7 @@ EXPORT vertbuffer_t device_create_vertexbuffer(device_t device,
|
||||
EXPORT indexbuffer_t device_create_indexbuffer(device_t device,
|
||||
enum gs_index_type type, void *indices, size_t num,
|
||||
uint32_t flags);
|
||||
EXPORT enum gs_texture_type device_gettexturetype(device_t device,
|
||||
texture_t texture);
|
||||
EXPORT enum gs_texture_type device_gettexturetype(texture_t texture);
|
||||
EXPORT void device_load_vertexbuffer(device_t device, vertbuffer_t vertbuffer);
|
||||
EXPORT void device_load_indexbuffer(device_t device, indexbuffer_t indexbuffer);
|
||||
EXPORT void device_load_texture(device_t device, texture_t tex, int unit);
|
||||
@ -145,7 +144,7 @@ EXPORT void stagesurface_destroy(stagesurf_t stagesurf);
|
||||
EXPORT uint32_t stagesurface_getwidth(stagesurf_t stagesurf);
|
||||
EXPORT uint32_t stagesurface_getheight(stagesurf_t stagesurf);
|
||||
EXPORT enum gs_color_format stagesurface_getcolorformat(stagesurf_t stagesurf);
|
||||
EXPORT bool stagesurface_map(stagesurf_t stagesurf, const void **data,
|
||||
EXPORT bool stagesurface_map(stagesurf_t stagesurf, const uint8_t **data,
|
||||
uint32_t *linesize);
|
||||
EXPORT void stagesurface_unmap(stagesurf_t stagesurf);
|
||||
|
||||
|
@ -70,6 +70,7 @@ stagesurf_t device_create_stagesurface(device_t device, uint32_t width,
|
||||
{
|
||||
struct gs_stage_surface *surf;
|
||||
surf = bzalloc(sizeof(struct gs_stage_surface));
|
||||
surf->device = device;
|
||||
surf->format = color_format;
|
||||
surf->width = width;
|
||||
surf->height = height;
|
||||
@ -176,7 +177,7 @@ enum gs_color_format stagesurface_getcolorformat(stagesurf_t stagesurf)
|
||||
return stagesurf->format;
|
||||
}
|
||||
|
||||
bool stagesurface_map(stagesurf_t stagesurf, const void **data,
|
||||
bool stagesurface_map(stagesurf_t stagesurf, const uint8_t **data,
|
||||
uint32_t *linesize)
|
||||
{
|
||||
if (!gl_bind_buffer(GL_PIXEL_PACK_BUFFER, stagesurf->pack_buffer))
|
||||
|
@ -85,10 +85,6 @@ static void gl_enable_debug()
|
||||
static void gl_enable_debug() {}
|
||||
#endif
|
||||
|
||||
static inline void required_extension_error(const char *extension)
|
||||
{
|
||||
}
|
||||
|
||||
static bool gl_init_extensions(struct gs_device* device)
|
||||
{
|
||||
if (!ogl_IsVersionGEQ(2, 1)) {
|
||||
@ -263,6 +259,14 @@ texture_t device_create_volumetexture(device_t device, uint32_t width,
|
||||
const void **data, uint32_t flags)
|
||||
{
|
||||
/* TODO */
|
||||
UNUSED_PARAMETER(device);
|
||||
UNUSED_PARAMETER(width);
|
||||
UNUSED_PARAMETER(height);
|
||||
UNUSED_PARAMETER(depth);
|
||||
UNUSED_PARAMETER(color_format);
|
||||
UNUSED_PARAMETER(levels);
|
||||
UNUSED_PARAMETER(data);
|
||||
UNUSED_PARAMETER(flags);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@ -279,8 +283,7 @@ samplerstate_t device_create_samplerstate(device_t device,
|
||||
return sampler;
|
||||
}
|
||||
|
||||
enum gs_texture_type device_gettexturetype(device_t device,
|
||||
texture_t texture)
|
||||
enum gs_texture_type device_gettexturetype(texture_t texture)
|
||||
{
|
||||
return texture->type;
|
||||
}
|
||||
@ -529,6 +532,9 @@ fail:
|
||||
void device_load_defaultsamplerstate(device_t device, bool b_3d, int unit)
|
||||
{
|
||||
/* TODO */
|
||||
UNUSED_PARAMETER(device);
|
||||
UNUSED_PARAMETER(b_3d);
|
||||
UNUSED_PARAMETER(unit);
|
||||
}
|
||||
|
||||
shader_t device_getvertexshader(device_t device)
|
||||
@ -902,6 +908,7 @@ fail:
|
||||
void device_endscene(device_t device)
|
||||
{
|
||||
/* does nothing */
|
||||
UNUSED_PARAMETER(device);
|
||||
}
|
||||
|
||||
void device_clear(device_t device, uint32_t clear_flags,
|
||||
@ -927,6 +934,8 @@ void device_clear(device_t device, uint32_t clear_flags,
|
||||
glClear(gl_flags);
|
||||
if (!gl_success("glClear"))
|
||||
blog(LOG_ERROR, "device_clear (GL) failed");
|
||||
|
||||
UNUSED_PARAMETER(device);
|
||||
}
|
||||
|
||||
void device_setcullmode(device_t device, enum gs_cull_mode mode)
|
||||
@ -958,6 +967,8 @@ void device_enable_blending(device_t device, bool enable)
|
||||
gl_enable(GL_BLEND);
|
||||
else
|
||||
gl_disable(GL_BLEND);
|
||||
|
||||
UNUSED_PARAMETER(device);
|
||||
}
|
||||
|
||||
void device_enable_depthtest(device_t device, bool enable)
|
||||
@ -966,6 +977,8 @@ void device_enable_depthtest(device_t device, bool enable)
|
||||
gl_enable(GL_DEPTH_TEST);
|
||||
else
|
||||
gl_disable(GL_DEPTH_TEST);
|
||||
|
||||
UNUSED_PARAMETER(device);
|
||||
}
|
||||
|
||||
void device_enable_stenciltest(device_t device, bool enable)
|
||||
@ -974,6 +987,8 @@ void device_enable_stenciltest(device_t device, bool enable)
|
||||
gl_enable(GL_STENCIL_TEST);
|
||||
else
|
||||
gl_disable(GL_STENCIL_TEST);
|
||||
|
||||
UNUSED_PARAMETER(device);
|
||||
}
|
||||
|
||||
void device_enable_stencilwrite(device_t device, bool enable)
|
||||
@ -982,12 +997,16 @@ void device_enable_stencilwrite(device_t device, bool enable)
|
||||
glStencilMask(0xFFFFFFFF);
|
||||
else
|
||||
glStencilMask(0);
|
||||
|
||||
UNUSED_PARAMETER(device);
|
||||
}
|
||||
|
||||
void device_enable_color(device_t device, bool red, bool green,
|
||||
bool blue, bool alpha)
|
||||
{
|
||||
glColorMask(red, green, blue, alpha);
|
||||
|
||||
UNUSED_PARAMETER(device);
|
||||
}
|
||||
|
||||
void device_blendfunction(device_t device, enum gs_blend_type src,
|
||||
@ -999,6 +1018,8 @@ void device_blendfunction(device_t device, enum gs_blend_type src,
|
||||
glBlendFunc(gl_src, gl_dst);
|
||||
if (!gl_success("glBlendFunc"))
|
||||
blog(LOG_ERROR, "device_blendfunction (GL) failed");
|
||||
|
||||
UNUSED_PARAMETER(device);
|
||||
}
|
||||
|
||||
void device_depthfunction(device_t device, enum gs_depth_test test)
|
||||
@ -1008,6 +1029,8 @@ void device_depthfunction(device_t device, enum gs_depth_test test)
|
||||
glDepthFunc(gl_test);
|
||||
if (!gl_success("glDepthFunc"))
|
||||
blog(LOG_ERROR, "device_depthfunction (GL) failed");
|
||||
|
||||
UNUSED_PARAMETER(device);
|
||||
}
|
||||
|
||||
void device_stencilfunction(device_t device, enum gs_stencil_side side,
|
||||
@ -1019,6 +1042,8 @@ void device_stencilfunction(device_t device, enum gs_stencil_side side,
|
||||
glStencilFuncSeparate(gl_side, gl_test, 0, 0xFFFFFFFF);
|
||||
if (!gl_success("glStencilFuncSeparate"))
|
||||
blog(LOG_ERROR, "device_stencilfunction (GL) failed");
|
||||
|
||||
UNUSED_PARAMETER(device);
|
||||
}
|
||||
|
||||
void device_stencilop(device_t device, enum gs_stencil_side side,
|
||||
@ -1033,16 +1058,21 @@ void device_stencilop(device_t device, enum gs_stencil_side side,
|
||||
glStencilOpSeparate(gl_side, gl_fail, gl_zfail, gl_zpass);
|
||||
if (!gl_success("glStencilOpSeparate"))
|
||||
blog(LOG_ERROR, "device_stencilop (GL) failed");
|
||||
|
||||
UNUSED_PARAMETER(device);
|
||||
}
|
||||
|
||||
void device_enable_fullscreen(device_t device, bool enable)
|
||||
{
|
||||
/* TODO */
|
||||
UNUSED_PARAMETER(device);
|
||||
UNUSED_PARAMETER(enable);
|
||||
}
|
||||
|
||||
int device_fullscreen_enabled(device_t device)
|
||||
{
|
||||
/* TODO */
|
||||
UNUSED_PARAMETER(device);
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -1050,18 +1080,26 @@ void device_setdisplaymode(device_t device,
|
||||
const struct gs_display_mode *mode)
|
||||
{
|
||||
/* TODO */
|
||||
UNUSED_PARAMETER(device);
|
||||
UNUSED_PARAMETER(mode);
|
||||
}
|
||||
|
||||
void device_getdisplaymode(device_t device,
|
||||
struct gs_display_mode *mode)
|
||||
{
|
||||
/* TODO */
|
||||
UNUSED_PARAMETER(device);
|
||||
UNUSED_PARAMETER(mode);
|
||||
}
|
||||
|
||||
void device_setcolorramp(device_t device, float gamma, float brightness,
|
||||
float contrast)
|
||||
{
|
||||
/* TODO */
|
||||
UNUSED_PARAMETER(device);
|
||||
UNUSED_PARAMETER(gamma);
|
||||
UNUSED_PARAMETER(brightness);
|
||||
UNUSED_PARAMETER(contrast);
|
||||
}
|
||||
|
||||
static inline uint32_t get_target_height(struct gs_device *device)
|
||||
@ -1105,6 +1143,7 @@ void device_getviewport(device_t device, struct gs_rect *rect)
|
||||
|
||||
void device_setscissorrect(device_t device, struct gs_rect *rect)
|
||||
{
|
||||
UNUSED_PARAMETER(device);
|
||||
glScissor(rect->x, rect->y, rect->cx, rect->cy);
|
||||
if (!gl_success("glScissor"))
|
||||
blog(LOG_ERROR, "device_setscissorrect (GL) failed");
|
||||
@ -1194,29 +1233,34 @@ void swapchain_destroy(swapchain_t swapchain)
|
||||
void volumetexture_destroy(texture_t voltex)
|
||||
{
|
||||
/* TODO */
|
||||
UNUSED_PARAMETER(voltex);
|
||||
}
|
||||
|
||||
uint32_t volumetexture_getwidth(texture_t voltex)
|
||||
{
|
||||
/* TODO */
|
||||
UNUSED_PARAMETER(voltex);
|
||||
return 0;
|
||||
}
|
||||
|
||||
uint32_t volumetexture_getheight(texture_t voltex)
|
||||
{
|
||||
/* TODO */
|
||||
UNUSED_PARAMETER(voltex);
|
||||
return 0;
|
||||
}
|
||||
|
||||
uint32_t volumetexture_getdepth(texture_t voltex)
|
||||
{
|
||||
/* TODO */
|
||||
UNUSED_PARAMETER(voltex);
|
||||
return 0;
|
||||
}
|
||||
|
||||
enum gs_color_format volumetexture_getcolorformat(texture_t voltex)
|
||||
{
|
||||
/* TODO */
|
||||
UNUSED_PARAMETER(voltex);
|
||||
return GS_UNKNOWN;
|
||||
}
|
||||
|
||||
|
@ -404,6 +404,8 @@ struct gs_texture_cube {
|
||||
};
|
||||
|
||||
struct gs_stage_surface {
|
||||
device_t device;
|
||||
|
||||
enum gs_color_format format;
|
||||
uint32_t width;
|
||||
uint32_t height;
|
||||
|
@ -164,6 +164,7 @@ void vertexbuffer_flush(vertbuffer_t vb, bool rebuild)
|
||||
goto failed;
|
||||
}
|
||||
|
||||
UNUSED_PARAMETER(rebuild);
|
||||
return;
|
||||
|
||||
failed:
|
||||
@ -202,8 +203,7 @@ static inline GLuint get_vb_buffer(struct gs_vertex_buffer *vb,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool load_vb_buffer(struct gs_shader *shader,
|
||||
struct shader_attrib *attrib,
|
||||
static bool load_vb_buffer(struct shader_attrib *attrib,
|
||||
struct gs_vertex_buffer *vb)
|
||||
{
|
||||
GLenum type;
|
||||
@ -245,7 +245,7 @@ static inline bool load_vb_buffers(struct gs_shader *shader,
|
||||
|
||||
for (i = 0; i < shader->attribs.num; i++) {
|
||||
struct shader_attrib *attrib = shader->attribs.array+i;
|
||||
if (!load_vb_buffer(shader, attrib, vb))
|
||||
if (!load_vb_buffer(attrib, vb))
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -96,6 +96,7 @@ set(libobs_mediaio_SOURCES
|
||||
media-io/format-conversion.c
|
||||
media-io/audio-io.c)
|
||||
set(libobs_mediaio_HEADERS
|
||||
media-io/media-io-defs.h
|
||||
media-io/format-conversion.h
|
||||
media-io/video-io.h
|
||||
media-io/audio-resampler.h
|
||||
|
@ -186,9 +186,9 @@ static inline bool calldata_getsize (calldata_t data, const char *name,
|
||||
}
|
||||
|
||||
static inline bool calldata_getptr (calldata_t data, const char *name,
|
||||
void **ptr)
|
||||
void *p_ptr)
|
||||
{
|
||||
return calldata_getdata(data, name, ptr, sizeof(*ptr));
|
||||
return calldata_getdata(data, name, p_ptr, sizeof(p_ptr));
|
||||
}
|
||||
|
||||
EXPORT bool calldata_getstring(calldata_t data, const char *name,
|
||||
|
@ -63,8 +63,7 @@ struct gs_exports {
|
||||
indexbuffer_t (*device_create_indexbuffer)(device_t device,
|
||||
enum gs_index_type type, void *indices, size_t num,
|
||||
uint32_t flags);
|
||||
enum gs_texture_type (*device_gettexturetype)(device_t device,
|
||||
texture_t texture);
|
||||
enum gs_texture_type (*device_gettexturetype)(texture_t texture);
|
||||
void (*device_load_vertexbuffer)(device_t device,
|
||||
vertbuffer_t vertbuffer);
|
||||
void (*device_load_indexbuffer)(device_t device,
|
||||
@ -157,8 +156,8 @@ struct gs_exports {
|
||||
uint32_t (*stagesurface_getheight)(stagesurf_t stagesurf);
|
||||
enum gs_color_format (*stagesurface_getcolorformat)(
|
||||
stagesurf_t stagesurf);
|
||||
bool (*stagesurface_map)(stagesurf_t stagesurf, const void **data,
|
||||
uint32_t *linesize);
|
||||
bool (*stagesurface_map)(stagesurf_t stagesurf,
|
||||
const uint8_t **data, uint32_t *linesize);
|
||||
void (*stagesurface_unmap)(stagesurf_t stagesurf);
|
||||
|
||||
void (*zstencil_destroy)(zstencil_t zstencil);
|
||||
|
@ -527,6 +527,8 @@ void gs_normal3v(const struct vec3 *v)
|
||||
void gs_color4v(const struct vec4 *v)
|
||||
{
|
||||
/* TODO */
|
||||
#pragma message ("TODO: implement gs_color4v")
|
||||
UNUSED_PARAMETER(v);
|
||||
}
|
||||
|
||||
void gs_texcoord2v(const struct vec2 *v, int unit)
|
||||
@ -542,6 +544,7 @@ void gs_texcoord2v(const struct vec2 *v, int unit)
|
||||
input_t gs_getinput(void)
|
||||
{
|
||||
/* TODO */
|
||||
#pragma message ("TODO: implement gs_getinput (hmm, not sure about input yet)")
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@ -628,18 +631,27 @@ shader_t gs_create_pixelshader_from_file(const char *file, char **error_string)
|
||||
texture_t gs_create_texture_from_file(const char *file, uint32_t flags)
|
||||
{
|
||||
/* TODO */
|
||||
#pragma message ("TODO: implement gs_create_texture_from_file")
|
||||
UNUSED_PARAMETER(file);
|
||||
UNUSED_PARAMETER(flags);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
texture_t gs_create_cubetexture_from_file(const char *file, uint32_t flags)
|
||||
{
|
||||
/* TODO */
|
||||
#pragma message ("TODO: implement gs_create_cubetexture_from_file")
|
||||
UNUSED_PARAMETER(file);
|
||||
UNUSED_PARAMETER(flags);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
texture_t gs_create_volumetexture_from_file(const char *file, uint32_t flags)
|
||||
{
|
||||
/* TODO */
|
||||
#pragma message ("TODO: implement gs_create_volumetexture_from_file")
|
||||
UNUSED_PARAMETER(file);
|
||||
UNUSED_PARAMETER(flags);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@ -667,8 +679,7 @@ static inline void assign_sprite_uv(float *start, float *end, bool flip)
|
||||
}
|
||||
|
||||
static void build_sprite(struct vb_data *data, float fcx, float fcy,
|
||||
float start_u, float end_u, float start_v, float end_v,
|
||||
uint32_t flip)
|
||||
float start_u, float end_u, float start_v, float end_v)
|
||||
{
|
||||
struct vec2 *tvarray = data->tvarray[0].array;
|
||||
|
||||
@ -690,7 +701,7 @@ static inline void build_sprite_norm(struct vb_data *data, float fcx, float fcy,
|
||||
|
||||
assign_sprite_uv(&start_u, &end_u, (flip & GS_FLIP_U) != 0);
|
||||
assign_sprite_uv(&start_v, &end_v, (flip & GS_FLIP_V) != 0);
|
||||
build_sprite(data, fcx, fcy, start_u, end_u, start_v, end_v, flip);
|
||||
build_sprite(data, fcx, fcy, start_u, end_u, start_v, end_v);
|
||||
}
|
||||
|
||||
static inline void build_sprite_rect(struct vb_data *data, texture_t tex,
|
||||
@ -703,7 +714,7 @@ static inline void build_sprite_rect(struct vb_data *data, texture_t tex,
|
||||
|
||||
assign_sprite_rect(&start_u, &end_u, width, (flip & GS_FLIP_U) != 0);
|
||||
assign_sprite_rect(&start_v, &end_v, height, (flip & GS_FLIP_V) != 0);
|
||||
build_sprite(data, fcx, fcy, start_u, end_u, start_v, end_v, flip);
|
||||
build_sprite(data, fcx, fcy, start_u, end_u, start_v, end_v);
|
||||
}
|
||||
|
||||
void gs_draw_sprite(texture_t tex, uint32_t flip, uint32_t width,
|
||||
@ -740,6 +751,14 @@ void gs_draw_cube_backdrop(texture_t cubetex, const struct quat *rot,
|
||||
float left, float right, float top, float bottom, float znear)
|
||||
{
|
||||
/* TODO */
|
||||
#pragma message ("TODO: implement gs_draw_cube_backdrop")
|
||||
UNUSED_PARAMETER(cubetex);
|
||||
UNUSED_PARAMETER(rot);
|
||||
UNUSED_PARAMETER(left);
|
||||
UNUSED_PARAMETER(right);
|
||||
UNUSED_PARAMETER(top);
|
||||
UNUSED_PARAMETER(bottom);
|
||||
UNUSED_PARAMETER(znear);
|
||||
}
|
||||
|
||||
void gs_resetviewport(void)
|
||||
@ -760,6 +779,10 @@ void gs_set2dmode(void)
|
||||
void gs_set3dmode(double fovy, double znear, double zvar)
|
||||
{
|
||||
/* TODO */
|
||||
#pragma message ("TODO: implement gs_set3dmode")
|
||||
UNUSED_PARAMETER(fovy);
|
||||
UNUSED_PARAMETER(znear);
|
||||
UNUSED_PARAMETER(zvar);
|
||||
}
|
||||
|
||||
void gs_viewport_push(void)
|
||||
@ -817,6 +840,12 @@ void cubetexture_setimage(texture_t cubetex, uint32_t side, const void *data,
|
||||
uint32_t linesize, bool invert)
|
||||
{
|
||||
/* TODO */
|
||||
#pragma message ("TODO: implement cubetexture_setimage")
|
||||
UNUSED_PARAMETER(cubetex);
|
||||
UNUSED_PARAMETER(side);
|
||||
UNUSED_PARAMETER(data);
|
||||
UNUSED_PARAMETER(linesize);
|
||||
UNUSED_PARAMETER(invert);
|
||||
}
|
||||
|
||||
void gs_perspective(float angle, float aspect, float near, float far)
|
||||
@ -999,8 +1028,7 @@ indexbuffer_t gs_create_indexbuffer(enum gs_index_type type,
|
||||
enum gs_texture_type gs_gettexturetype(texture_t texture)
|
||||
{
|
||||
graphics_t graphics = thread_graphics;
|
||||
return graphics->exports.device_gettexturetype(graphics->device,
|
||||
texture);
|
||||
return graphics->exports.device_gettexturetype(texture);
|
||||
}
|
||||
|
||||
void gs_load_vertexbuffer(vertbuffer_t vertbuffer)
|
||||
@ -1526,7 +1554,7 @@ enum gs_color_format stagesurface_getcolorformat(stagesurf_t stagesurf)
|
||||
return graphics->exports.stagesurface_getcolorformat(stagesurf);
|
||||
}
|
||||
|
||||
bool stagesurface_map(stagesurf_t stagesurf, const void **data,
|
||||
bool stagesurface_map(stagesurf_t stagesurf, const uint8_t **data,
|
||||
uint32_t *linesize)
|
||||
{
|
||||
graphics_t graphics = thread_graphics;
|
||||
@ -1610,5 +1638,3 @@ bool texture_rebind_iosurface(texture_t texture, void *iosurf)
|
||||
|
||||
return graphics->exports.texture_rebind_iosurface(texture, iosurf);
|
||||
}
|
||||
|
||||
|
||||
|
@ -659,7 +659,7 @@ EXPORT void stagesurface_destroy(stagesurf_t stagesurf);
|
||||
EXPORT uint32_t stagesurface_getwidth(stagesurf_t stagesurf);
|
||||
EXPORT uint32_t stagesurface_getheight(stagesurf_t stagesurf);
|
||||
EXPORT enum gs_color_format stagesurface_getcolorformat(stagesurf_t stagesurf);
|
||||
EXPORT bool stagesurface_map(stagesurf_t stagesurf, const void **data,
|
||||
EXPORT bool stagesurface_map(stagesurf_t stagesurf, const uint8_t **data,
|
||||
uint32_t *linesize);
|
||||
EXPORT void stagesurface_unmap(stagesurf_t stagesurf);
|
||||
|
||||
|
@ -39,9 +39,9 @@ struct audio_line {
|
||||
char *name;
|
||||
|
||||
struct audio_output *audio;
|
||||
struct circlebuf buffers[MAX_AUDIO_PLANES];
|
||||
struct circlebuf buffers[MAX_AV_PLANES];
|
||||
pthread_mutex_t mutex;
|
||||
DARRAY(uint8_t) volume_buffers[MAX_AUDIO_PLANES];
|
||||
DARRAY(uint8_t) volume_buffers[MAX_AV_PLANES];
|
||||
uint64_t base_timestamp;
|
||||
uint64_t last_timestamp;
|
||||
|
||||
@ -55,7 +55,7 @@ struct audio_line {
|
||||
|
||||
static inline void audio_line_destroy_data(struct audio_line *line)
|
||||
{
|
||||
for (size_t i = 0; i < MAX_AUDIO_PLANES; i++) {
|
||||
for (size_t i = 0; i < MAX_AV_PLANES; i++) {
|
||||
circlebuf_free(&line->buffers[i]);
|
||||
da_free(line->volume_buffers[i]);
|
||||
}
|
||||
@ -74,7 +74,7 @@ struct audio_output {
|
||||
pthread_t thread;
|
||||
event_t stop_event;
|
||||
|
||||
DARRAY(uint8_t) mix_buffers[MAX_AUDIO_PLANES];
|
||||
DARRAY(uint8_t) mix_buffers[MAX_AV_PLANES];
|
||||
|
||||
bool initialized;
|
||||
|
||||
@ -196,7 +196,7 @@ static inline void do_audio_output(struct audio_output *audio,
|
||||
uint64_t timestamp, uint32_t frames)
|
||||
{
|
||||
struct audio_data data;
|
||||
for (size_t i = 0; i < MAX_AUDIO_PLANES; i++)
|
||||
for (size_t i = 0; i < MAX_AV_PLANES; i++)
|
||||
data.data[i] = audio->mix_buffers[i].array;
|
||||
data.frames = frames;
|
||||
data.timestamp = timestamp;
|
||||
@ -415,7 +415,7 @@ void audio_output_close(audio_t audio)
|
||||
line = next;
|
||||
}
|
||||
|
||||
for (size_t i = 0; i < MAX_AUDIO_PLANES; i++)
|
||||
for (size_t i = 0; i < MAX_AV_PLANES; i++)
|
||||
da_free(audio->mix_buffers[i]);
|
||||
|
||||
event_destroy(&audio->stop_event);
|
||||
|
@ -17,6 +17,7 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "media-io-defs.h"
|
||||
#include "../util/c99defs.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
@ -28,8 +29,6 @@ extern "C" {
|
||||
* for the media.
|
||||
*/
|
||||
|
||||
#define MAX_AUDIO_PLANES 8
|
||||
|
||||
struct audio_output;
|
||||
struct audio_line;
|
||||
typedef struct audio_output *audio_t;
|
||||
@ -64,7 +63,7 @@ enum speaker_layout {
|
||||
};
|
||||
|
||||
struct audio_data {
|
||||
const uint8_t *data[MAX_AUDIO_PLANES];
|
||||
const uint8_t *data[MAX_AV_PLANES];
|
||||
uint32_t frames;
|
||||
uint64_t timestamp;
|
||||
float volume;
|
||||
|
@ -30,7 +30,7 @@ struct audio_resampler {
|
||||
uint64_t input_layout;
|
||||
enum AVSampleFormat input_format;
|
||||
|
||||
uint8_t *output_buffer[MAX_AUDIO_PLANES];
|
||||
uint8_t *output_buffer[MAX_AV_PLANES];
|
||||
uint64_t output_layout;
|
||||
enum AVSampleFormat output_format;
|
||||
int output_size;
|
||||
|
@ -19,74 +19,74 @@
|
||||
#include <xmmintrin.h>
|
||||
#include <emmintrin.h>
|
||||
|
||||
/* ...surprisingly, if I don't use a macro to force inlining, it causes the
|
||||
* CPU usage to boost by a tremendous amount in debug builds. */
|
||||
|
||||
#define get_m128_32_0(val) (*((uint32_t*)&val))
|
||||
#define get_m128_32_1(val) (*(((uint32_t*)&val)+1))
|
||||
|
||||
static FORCE_INLINE void pack_lum(uint8_t *lum_plane,
|
||||
uint32_t lum_pos0, uint32_t lum_pos1,
|
||||
const __m128i line1, const __m128i line2,
|
||||
const __m128i lum_mask)
|
||||
#define pack_lum(lum_plane, lum_pos0, lum_pos1, line1, line2, lum_mask) \
|
||||
do { \
|
||||
__m128i pack_val = _mm_packs_epi32( \
|
||||
_mm_srli_si128(_mm_and_si128(line1, lum_mask), 1), \
|
||||
_mm_srli_si128(_mm_and_si128(line2, lum_mask), 1)); \
|
||||
pack_val = _mm_packus_epi16(pack_val, pack_val); \
|
||||
\
|
||||
*(uint32_t*)(lum_plane+lum_pos0) = get_m128_32_0(pack_val); \
|
||||
*(uint32_t*)(lum_plane+lum_pos1) = get_m128_32_1(pack_val); \
|
||||
} while (false)
|
||||
|
||||
#define pack_ch_1plane(uv_plane, chroma_pos, line1, line2, uv_mask) \
|
||||
do { \
|
||||
__m128i add_val = _mm_add_epi64( \
|
||||
_mm_and_si128(line1, uv_mask), \
|
||||
_mm_and_si128(line2, uv_mask)); \
|
||||
__m128i avg_val = _mm_add_epi64( \
|
||||
add_val, \
|
||||
_mm_shuffle_epi32(add_val, _MM_SHUFFLE(2, 3, 0, 1))); \
|
||||
avg_val = _mm_srai_epi16(avg_val, 2); \
|
||||
avg_val = _mm_shuffle_epi32(avg_val, _MM_SHUFFLE(3, 1, 2, 0)); \
|
||||
avg_val = _mm_packus_epi16(avg_val, avg_val); \
|
||||
\
|
||||
*(uint32_t*)(uv_plane+chroma_pos) = get_m128_32_0(avg_val); \
|
||||
} while (false)
|
||||
|
||||
#define pack_ch_2plane(u_plane, v_plane, chroma_pos, line1, line2, uv_mask) \
|
||||
do { \
|
||||
uint32_t packed_vals; \
|
||||
\
|
||||
__m128i add_val = _mm_add_epi64( \
|
||||
_mm_and_si128(line1, uv_mask), \
|
||||
_mm_and_si128(line2, uv_mask)); \
|
||||
__m128i avg_val = _mm_add_epi64( \
|
||||
add_val, \
|
||||
_mm_shuffle_epi32(add_val, _MM_SHUFFLE(2, 3, 0, 1))); \
|
||||
avg_val = _mm_srai_epi16(avg_val, 2); \
|
||||
avg_val = _mm_shuffle_epi32(avg_val, _MM_SHUFFLE(3, 1, 2, 0)); \
|
||||
avg_val = _mm_shufflelo_epi16(avg_val, _MM_SHUFFLE(3, 1, 2, 0)); \
|
||||
avg_val = _mm_packus_epi16(avg_val, avg_val); \
|
||||
\
|
||||
packed_vals = get_m128_32_0(avg_val); \
|
||||
\
|
||||
*(uint16_t*)(u_plane+chroma_pos) = (uint16_t)(packed_vals); \
|
||||
*(uint16_t*)(v_plane+chroma_pos) = (uint16_t)(packed_vals>>16); \
|
||||
} while (false)
|
||||
|
||||
|
||||
static FORCE_INLINE uint32_t min_uint32(uint32_t a, uint32_t b)
|
||||
{
|
||||
__m128i pack_val = _mm_packs_epi32(
|
||||
_mm_srli_si128(_mm_and_si128(line1, lum_mask), 1),
|
||||
_mm_srli_si128(_mm_and_si128(line2, lum_mask), 1));
|
||||
pack_val = _mm_packus_epi16(pack_val, pack_val);
|
||||
|
||||
*(uint32_t*)(lum_plane+lum_pos0) = get_m128_32_0(pack_val);
|
||||
*(uint32_t*)(lum_plane+lum_pos1) = get_m128_32_1(pack_val);
|
||||
}
|
||||
|
||||
static FORCE_INLINE void pack_chroma_1plane(uint8_t *uv_plane,
|
||||
uint32_t chroma_pos,
|
||||
const __m128i line1, const __m128i line2,
|
||||
const __m128i uv_mask)
|
||||
{
|
||||
__m128i add_val = _mm_add_epi64(
|
||||
_mm_and_si128(line1, uv_mask),
|
||||
_mm_and_si128(line2, uv_mask));
|
||||
__m128i avg_val = _mm_add_epi64(
|
||||
add_val,
|
||||
_mm_shuffle_epi32(add_val, _MM_SHUFFLE(2, 3, 0, 1)));
|
||||
avg_val = _mm_srai_epi16(avg_val, 2);
|
||||
avg_val = _mm_shuffle_epi32(avg_val, _MM_SHUFFLE(3, 1, 2, 0));
|
||||
avg_val = _mm_packus_epi16(avg_val, avg_val);
|
||||
|
||||
*(uint32_t*)(uv_plane+chroma_pos) = get_m128_32_0(avg_val);
|
||||
}
|
||||
|
||||
static FORCE_INLINE void pack_chroma_2plane(uint8_t *u_plane, uint8_t *v_plane,
|
||||
uint32_t chroma_pos,
|
||||
const __m128i line1, const __m128i line2,
|
||||
const __m128i uv_mask)
|
||||
{
|
||||
uint32_t packed_vals;
|
||||
|
||||
__m128i add_val = _mm_add_epi64(
|
||||
_mm_and_si128(line1, uv_mask),
|
||||
_mm_and_si128(line2, uv_mask));
|
||||
__m128i avg_val = _mm_add_epi64(
|
||||
add_val,
|
||||
_mm_shuffle_epi32(add_val, _MM_SHUFFLE(2, 3, 0, 1)));
|
||||
avg_val = _mm_srai_epi16(avg_val, 2);
|
||||
avg_val = _mm_shuffle_epi32(avg_val, _MM_SHUFFLE(3, 1, 2, 0));
|
||||
avg_val = _mm_shufflelo_epi16(avg_val, _MM_SHUFFLE(3, 1, 2, 0));
|
||||
avg_val = _mm_packus_epi16(avg_val, avg_val);
|
||||
|
||||
packed_vals = get_m128_32_0(avg_val);
|
||||
|
||||
*(uint16_t*)(u_plane+chroma_pos) = (uint16_t)(packed_vals);
|
||||
*(uint16_t*)(v_plane+chroma_pos) = (uint16_t)(packed_vals>>16);
|
||||
return a < b ? a : b;
|
||||
}
|
||||
|
||||
void compress_uyvx_to_i420(
|
||||
const uint8_t *input, uint32_t in_linesize,
|
||||
uint32_t width, uint32_t height,
|
||||
uint32_t start_y, uint32_t end_y,
|
||||
uint8_t *output[], const uint32_t out_linesize[])
|
||||
{
|
||||
uint8_t *lum_plane = output[0];
|
||||
uint8_t *u_plane = output[1];
|
||||
uint8_t *v_plane = output[2];
|
||||
uint32_t width = min_uint32(in_linesize, out_linesize[0]);
|
||||
uint32_t y;
|
||||
|
||||
__m128i lum_mask = _mm_set1_epi32(0x0000FF00);
|
||||
@ -109,7 +109,7 @@ void compress_uyvx_to_i420(
|
||||
|
||||
pack_lum(lum_plane, lum_pos0, lum_pos1,
|
||||
line1, line2, lum_mask);
|
||||
pack_chroma_2plane(u_plane, v_plane,
|
||||
pack_ch_2plane(u_plane, v_plane,
|
||||
chroma_y_pos + (x>>1),
|
||||
line1, line2, uv_mask);
|
||||
}
|
||||
@ -118,12 +118,12 @@ void compress_uyvx_to_i420(
|
||||
|
||||
void compress_uyvx_to_nv12(
|
||||
const uint8_t *input, uint32_t in_linesize,
|
||||
uint32_t width, uint32_t height,
|
||||
uint32_t start_y, uint32_t end_y,
|
||||
uint8_t *output[], const uint32_t out_linesize[])
|
||||
{
|
||||
uint8_t *lum_plane = output[0];
|
||||
uint8_t *chroma_plane = output[1];
|
||||
uint32_t width = min_uint32(in_linesize, out_linesize[0]);
|
||||
uint32_t y;
|
||||
|
||||
__m128i lum_mask = _mm_set1_epi32(0x0000FF00);
|
||||
@ -146,7 +146,7 @@ void compress_uyvx_to_nv12(
|
||||
|
||||
pack_lum(lum_plane, lum_pos0, lum_pos1,
|
||||
line1, line2, lum_mask);
|
||||
pack_chroma_1plane(chroma_plane, chroma_y_pos + x,
|
||||
pack_ch_1plane(chroma_plane, chroma_y_pos + x,
|
||||
line1, line2, uv_mask);
|
||||
}
|
||||
}
|
||||
@ -154,12 +154,11 @@ void compress_uyvx_to_nv12(
|
||||
|
||||
void decompress_420(
|
||||
const uint8_t *const input[], const uint32_t in_linesize[],
|
||||
uint32_t width, uint32_t height,
|
||||
uint32_t start_y, uint32_t end_y,
|
||||
uint8_t *output, uint32_t out_linesize)
|
||||
{
|
||||
uint32_t start_y_d2 = start_y/2;
|
||||
uint32_t width_d2 = width/2;
|
||||
uint32_t width_d2 = min_uint32(in_linesize[0], out_linesize)/2;
|
||||
uint32_t height_d2 = end_y/2;
|
||||
uint32_t y;
|
||||
|
||||
@ -170,8 +169,8 @@ void decompress_420(
|
||||
register uint32_t *output0, *output1;
|
||||
uint32_t x;
|
||||
|
||||
lum0 = input[0] + y * 2*width;
|
||||
lum1 = lum0 + width;
|
||||
lum0 = input[0] + y * 2 * in_linesize[0];
|
||||
lum1 = lum0 + in_linesize[0];
|
||||
output0 = (uint32_t*)(output + y * 2 * in_linesize[0]);
|
||||
output1 = (uint32_t*)((uint8_t*)output0 + in_linesize[0]);
|
||||
|
||||
@ -190,12 +189,11 @@ void decompress_420(
|
||||
|
||||
void decompress_nv12(
|
||||
const uint8_t *const input[], const uint32_t in_linesize[],
|
||||
uint32_t width, uint32_t height,
|
||||
uint32_t start_y, uint32_t end_y,
|
||||
uint8_t *output, uint32_t out_linesize)
|
||||
{
|
||||
uint32_t start_y_d2 = start_y/2;
|
||||
uint32_t width_d2 = width/2;
|
||||
uint32_t width_d2 = min_uint32(in_linesize[0], out_linesize)/2;
|
||||
uint32_t height_d2 = end_y/2;
|
||||
uint32_t y;
|
||||
|
||||
@ -206,9 +204,9 @@ void decompress_nv12(
|
||||
uint32_t x;
|
||||
|
||||
chroma = (const uint16_t*)(input[1] + y * in_linesize[1]);
|
||||
lum0 = input[0] + y*2 * in_linesize[0];
|
||||
lum0 = input[0] + y * 2 * in_linesize[0];
|
||||
lum1 = lum0 + in_linesize[0];
|
||||
output0 = (uint32_t*)(output + y*2 * out_linesize);
|
||||
output0 = (uint32_t*)(output + y * 2 * out_linesize);
|
||||
output1 = (uint32_t*)((uint8_t*)output0 + out_linesize);
|
||||
|
||||
for (x = 0; x < width_d2; x++) {
|
||||
@ -225,12 +223,11 @@ void decompress_nv12(
|
||||
|
||||
void decompress_422(
|
||||
const uint8_t *input, uint32_t in_linesize,
|
||||
uint32_t width, uint32_t height,
|
||||
uint32_t start_y, uint32_t end_y,
|
||||
uint8_t *output, uint32_t out_linesize,
|
||||
bool leading_lum)
|
||||
{
|
||||
uint32_t width_d2 = width >> 1;
|
||||
uint32_t width_d2 = min_uint32(in_linesize, out_linesize)/2;
|
||||
uint32_t y;
|
||||
|
||||
register const uint32_t *input32;
|
||||
|
@ -29,31 +29,26 @@ extern "C" {
|
||||
|
||||
EXPORT void compress_uyvx_to_i420(
|
||||
const uint8_t *input, uint32_t in_linesize,
|
||||
uint32_t width, uint32_t height,
|
||||
uint32_t start_y, uint32_t end_y,
|
||||
uint8_t *output[], const uint32_t out_linesize[]);
|
||||
|
||||
EXPORT void compress_uyvx_to_nv12(
|
||||
const uint8_t *input, uint32_t in_linesize,
|
||||
uint32_t width, uint32_t height,
|
||||
uint32_t start_y, uint32_t end_y,
|
||||
uint8_t *output[], const uint32_t out_linesize[]);
|
||||
|
||||
EXPORT void decompress_nv12(
|
||||
const uint8_t *const input[], const uint32_t in_linesize[],
|
||||
uint32_t width, uint32_t height,
|
||||
uint32_t start_y, uint32_t end_y,
|
||||
uint8_t *output, uint32_t out_linesize);
|
||||
|
||||
EXPORT void decompress_420(
|
||||
const uint8_t *const input[], const uint32_t in_linesize[],
|
||||
uint32_t width, uint32_t height,
|
||||
uint32_t start_y, uint32_t end_y,
|
||||
uint8_t *output, uint32_t out_linesize);
|
||||
|
||||
EXPORT void decompress_422(
|
||||
const uint8_t *input, uint32_t in_linesize,
|
||||
uint32_t width, uint32_t height,
|
||||
uint32_t start_y, uint32_t end_y,
|
||||
uint8_t *output, uint32_t out_linesize,
|
||||
bool leading_lum);
|
||||
|
20
libobs/media-io/media-io-defs.h
Normal file
20
libobs/media-io/media-io-defs.h
Normal file
@ -0,0 +1,20 @@
|
||||
/******************************************************************************
|
||||
Copyright (C) 2014 by Hugh Bailey <obs.jim@gmail.com>
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation, either version 2 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
******************************************************************************/
|
||||
|
||||
#pragma once
|
||||
|
||||
#define MAX_AV_PLANES 8
|
@ -63,9 +63,6 @@ static inline void video_swapframes(struct video_output *video)
|
||||
|
||||
static inline void video_output_cur_frame(struct video_output *video)
|
||||
{
|
||||
size_t width = video->info.width;
|
||||
size_t height = video->info.height;
|
||||
|
||||
if (!video->cur_frame.data[0])
|
||||
return;
|
||||
|
||||
|
@ -17,6 +17,7 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "media-io-defs.h"
|
||||
#include "../util/c99defs.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
@ -25,8 +26,6 @@ extern "C" {
|
||||
|
||||
/* Base video output component. Use this to create an video output track. */
|
||||
|
||||
#define MAX_VIDEO_PLANES 8
|
||||
|
||||
struct video_output;
|
||||
typedef struct video_output *video_t;
|
||||
|
||||
@ -49,8 +48,8 @@ enum video_format {
|
||||
};
|
||||
|
||||
struct video_frame {
|
||||
const uint8_t *data[MAX_VIDEO_PLANES];
|
||||
uint32_t linesize[MAX_VIDEO_PLANES];
|
||||
const uint8_t *data[MAX_AV_PLANES];
|
||||
uint32_t linesize[MAX_AV_PLANES];
|
||||
uint64_t timestamp;
|
||||
};
|
||||
|
||||
|
@ -237,6 +237,8 @@ obs_data_t obs_data_create()
|
||||
obs_data_t obs_data_create_from_json(const char *json_string)
|
||||
{
|
||||
/* TODO */
|
||||
#pragma message ("TODO: implement obs_data_create_from_json")
|
||||
UNUSED_PARAMETER(json_string);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
@ -143,7 +143,7 @@ static inline void render_display_begin(struct obs_display *display)
|
||||
gs_setviewport(0, 0, display->cx, display->cy);
|
||||
}
|
||||
|
||||
static inline void render_display_end(struct obs_display *display)
|
||||
static inline void render_display_end()
|
||||
{
|
||||
gs_endscene();
|
||||
gs_present();
|
||||
@ -166,5 +166,5 @@ void render_display(struct obs_display *display)
|
||||
|
||||
pthread_mutex_unlock(&display->draw_callbacks_mutex);
|
||||
|
||||
render_display_end(display);
|
||||
render_display_end();
|
||||
}
|
||||
|
@ -69,6 +69,8 @@ obs_encoder_t obs_encoder_create(const char *id, const char *name,
|
||||
pthread_mutex_lock(&obs->data.encoders_mutex);
|
||||
da_push_back(obs->data.encoders, &encoder);
|
||||
pthread_mutex_unlock(&obs->data.encoders_mutex);
|
||||
|
||||
encoder->name = bstrdup(name);
|
||||
return encoder;
|
||||
}
|
||||
|
||||
@ -81,6 +83,7 @@ void obs_encoder_destroy(obs_encoder_t encoder)
|
||||
|
||||
encoder->info.destroy(encoder->data);
|
||||
obs_data_release(encoder->settings);
|
||||
bfree(encoder->name);
|
||||
bfree(encoder);
|
||||
}
|
||||
}
|
||||
@ -88,53 +91,73 @@ void obs_encoder_destroy(obs_encoder_t encoder)
|
||||
obs_properties_t obs_encoder_properties(const char *id, const char *locale)
|
||||
{
|
||||
const struct obs_encoder_info *ei = get_encoder_info(id);
|
||||
if (ei && ei->properties)
|
||||
return ei->properties(locale);
|
||||
if (ei && ei->get_properties)
|
||||
return ei->get_properties(locale);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void obs_encoder_update(obs_encoder_t encoder, obs_data_t settings)
|
||||
{
|
||||
if (!encoder) return;
|
||||
|
||||
obs_data_replace(&encoder->settings, settings);
|
||||
encoder->info.update(encoder->data, encoder->settings);
|
||||
}
|
||||
|
||||
bool obs_encoder_reset(obs_encoder_t encoder)
|
||||
bool obs_encoder_reset(obs_encoder_t encoder, obs_data_t settings)
|
||||
{
|
||||
return encoder->info.reset(encoder->data);
|
||||
if (!encoder) return false;
|
||||
|
||||
return encoder->info.reset(encoder->data, settings);
|
||||
}
|
||||
|
||||
bool obs_encoder_encode(obs_encoder_t encoder, void *frames, size_t size)
|
||||
bool obs_encoder_encode(obs_encoder_t encoder,
|
||||
const struct encoder_frame *frame,
|
||||
struct encoder_packet *packet, bool *received_packet)
|
||||
{
|
||||
/* TODO */
|
||||
//encoder->info.encode(encoder->data, frames, size, packets);
|
||||
return false;
|
||||
if (!encoder) return false;
|
||||
|
||||
return encoder->info.encode(encoder->data, frame, packet,
|
||||
received_packet);
|
||||
}
|
||||
|
||||
int obs_encoder_getheader(obs_encoder_t encoder,
|
||||
struct encoder_packet **packets)
|
||||
bool obs_encoder_get_extra_data(obs_encoder_t encoder, uint8_t **extra_data,
|
||||
size_t *size)
|
||||
{
|
||||
return encoder->info.getheader(encoder, packets);
|
||||
}
|
||||
if (!encoder) return false;
|
||||
|
||||
bool obs_encoder_setbitrate(obs_encoder_t encoder, uint32_t bitrate,
|
||||
uint32_t buffersize)
|
||||
{
|
||||
if (encoder->info.setbitrate)
|
||||
return encoder->info.setbitrate(encoder->data, bitrate,
|
||||
buffersize);
|
||||
return false;
|
||||
}
|
||||
if (encoder->info.get_extra_data)
|
||||
return encoder->info.get_extra_data(encoder, extra_data, size);
|
||||
|
||||
bool obs_encoder_request_keyframe(obs_encoder_t encoder)
|
||||
{
|
||||
if (encoder->info.request_keyframe)
|
||||
return encoder->info.request_keyframe(encoder->data);
|
||||
return false;
|
||||
}
|
||||
|
||||
obs_data_t obs_encoder_get_settings(obs_encoder_t encoder)
|
||||
{
|
||||
if (!encoder) return NULL;
|
||||
|
||||
obs_data_addref(encoder->settings);
|
||||
return encoder->settings;
|
||||
}
|
||||
|
||||
bool obs_encoder_start(obs_encoder_t encoder,
|
||||
void (*new_packet)(void *param, struct encoder_packet *packet),
|
||||
void *param)
|
||||
{
|
||||
#pragma message ("TODO: implement obs_encoder_start")
|
||||
UNUSED_PARAMETER(encoder);
|
||||
UNUSED_PARAMETER(new_packet);
|
||||
UNUSED_PARAMETER(param);
|
||||
return false;
|
||||
}
|
||||
|
||||
void obs_encoder_stop(obs_encoder_t encoder,
|
||||
void (*new_packet)(void *param, struct encoder_packet *packet),
|
||||
void *param)
|
||||
{
|
||||
#pragma message ("TODO: implement obs_encoder_stop")
|
||||
UNUSED_PARAMETER(encoder);
|
||||
UNUSED_PARAMETER(new_packet);
|
||||
UNUSED_PARAMETER(param);
|
||||
return;
|
||||
}
|
||||
|
@ -17,27 +17,153 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
struct obs_encoder_info {
|
||||
const char *id;
|
||||
|
||||
const char *(*getname)(const char *locale);
|
||||
|
||||
void *(*create)(obs_data_t settings, obs_encoder_t encoder);
|
||||
void (*destroy)(void *data);
|
||||
|
||||
bool (*reset)(void *data);
|
||||
|
||||
int (*encode)(void *data, void *frames, size_t size,
|
||||
struct encoder_packet **packets);
|
||||
int (*getheader)(void *data, struct encoder_packet **packets);
|
||||
|
||||
/* optional */
|
||||
void (*update)(void *data, obs_data_t settings);
|
||||
|
||||
obs_properties_t (*properties)(const char *locale);
|
||||
|
||||
bool (*setbitrate)(void *data, uint32_t bitrate, uint32_t buffersize);
|
||||
bool (*request_keyframe)(void *data);
|
||||
/** Specifies the encoder type */
|
||||
enum obs_encoder_type {
|
||||
OBS_PACKET_AUDIO,
|
||||
OBS_PACKET_VIDEO
|
||||
};
|
||||
|
||||
/** Encoder output packet */
|
||||
struct encoder_packet {
|
||||
uint8_t *data; /**< Packet data */
|
||||
size_t size; /**< Packet size */
|
||||
|
||||
int64_t pts; /**< Presentation timestamp */
|
||||
int64_t dts; /**< Decode timestamp */
|
||||
|
||||
enum obs_encoder_type type; /**< Encoder type */
|
||||
|
||||
/**
|
||||
* Packet priority
|
||||
*
|
||||
* This is generally use by video encoders to specify the priority
|
||||
* of the packet. If this frame is dropped, it will have to wait for
|
||||
* another packet of drop_priority.
|
||||
*/
|
||||
int priority;
|
||||
|
||||
/**
|
||||
* Dropped packet priority
|
||||
*
|
||||
* If this packet is dropped, the next packet must be of this priority
|
||||
* or higher to continue transmission.
|
||||
*/
|
||||
int drop_priority;
|
||||
};
|
||||
|
||||
/** Encoder input frame */
|
||||
struct encoder_frame {
|
||||
/** Data for the frame/audio */
|
||||
uint8_t *data[MAX_AV_PLANES];
|
||||
|
||||
/** size of each plane */
|
||||
uint32_t linesize[MAX_AV_PLANES];
|
||||
|
||||
/** Number of frames (audio only) */
|
||||
uint32_t frames;
|
||||
|
||||
/** Presentation timestamp */
|
||||
int64_t pts;
|
||||
};
|
||||
|
||||
/**
|
||||
* Encoder interface
|
||||
*
|
||||
* Encoders have a limited usage with OBS. You are not generally supposed to
|
||||
* implement every encoder out there. Generally, these are limited or specific
|
||||
* encoders for h264/aac for streaming and recording. It doesn't have to be
|
||||
* *just* h264 or aac of course, but generally those are the expected encoders.
|
||||
*
|
||||
* That being said, other encoders will be kept in mind for future use.
|
||||
*/
|
||||
struct obs_encoder_info {
|
||||
/* ----------------------------------------------------------------- */
|
||||
/* Required implementation*/
|
||||
|
||||
/** Specifies the named identifier of this encoder */
|
||||
const char *id;
|
||||
|
||||
/**
|
||||
* Gets the full translated name of this encoder
|
||||
*
|
||||
* @param locale Locale to use for translation
|
||||
* @return Translated name of the encoder
|
||||
*/
|
||||
const char *(*getname)(const char *locale);
|
||||
|
||||
/**
|
||||
* Creates the encoder with the specified settings
|
||||
*
|
||||
* @param settings Settings for the encoder
|
||||
* @param encoder OBS encoder context
|
||||
* @return Data associated with this encoder context
|
||||
*/
|
||||
void *(*create)(obs_data_t settings, obs_encoder_t encoder);
|
||||
|
||||
/**
|
||||
* Destroys the encoder data
|
||||
*
|
||||
* @param data Data associated with this encoder context
|
||||
*/
|
||||
void (*destroy)(void *data);
|
||||
|
||||
/**
|
||||
* Resets the encoder with the specified settings
|
||||
*
|
||||
* @param data Data associated with this encoder context
|
||||
* @param settings New settings for the encoder
|
||||
* @return true if successful, false otherwise
|
||||
*/
|
||||
bool (*reset)(void *data, obs_data_t settings);
|
||||
|
||||
/**
|
||||
* Encodes frame(s), and outputs encoded packets as they become
|
||||
* available.
|
||||
*
|
||||
* @param data Data associated with this encoder
|
||||
* context
|
||||
* @param[in] frame Raw audio/video data to encode
|
||||
* @param[out] packet Encoder packet output, if any
|
||||
* @param[out] received_packet Set to true if a packet was received,
|
||||
* false otherwise
|
||||
* @return true if successful, false otherwise.
|
||||
*/
|
||||
int (*encode)(void *data, const struct encoder_frame *frame,
|
||||
struct encoder_packet *packet, bool *received_packet);
|
||||
|
||||
/* ----------------------------------------------------------------- */
|
||||
/* Optional implementation */
|
||||
|
||||
/**
|
||||
* Gets the property information of this encoder
|
||||
*
|
||||
* @param locale The locale to translate with
|
||||
* @return The properties data
|
||||
*/
|
||||
obs_properties_t (*get_properties)(const char *locale);
|
||||
|
||||
/**
|
||||
* Updates the settings for this encoder
|
||||
*
|
||||
* @param data Data associated with this encoder context
|
||||
* @param settings New settings for this encoder
|
||||
*/
|
||||
void (*update)(void *data, obs_data_t settings);
|
||||
|
||||
/**
|
||||
* Returns extra data associated with this encoder (usually header)
|
||||
*
|
||||
* @param data Data associated with this encoder context
|
||||
* @param extra_data Pointer to receive the extra data
|
||||
* @param size Pointer to receive the size of the extra data
|
||||
*/
|
||||
bool (*get_extra_data)(void *data, uint8_t **extra_data, size_t *size);
|
||||
};
|
||||
|
||||
/**
|
||||
* Register an encoder definition to the current obs context. This should be
|
||||
* used in obs_module_load.
|
||||
*
|
||||
* @param info Pointer to the source definition structure.
|
||||
*/
|
||||
EXPORT void obs_register_encoder(const struct obs_encoder_info *info);
|
||||
|
@ -202,7 +202,6 @@ void obs_register_encoder(const struct obs_encoder_info *info)
|
||||
CHECK_REQUIRED_VAL(info, destroy, obs_register_encoder);
|
||||
CHECK_REQUIRED_VAL(info, reset, obs_register_encoder);
|
||||
CHECK_REQUIRED_VAL(info, encode, obs_register_encoder);
|
||||
CHECK_REQUIRED_VAL(info, getheader, obs_register_encoder);
|
||||
|
||||
REGISTER_OBS_DEF(cur_encoder_info_size, obs_encoder_info,
|
||||
obs->encoder_types, info);
|
||||
@ -210,12 +209,9 @@ void obs_register_encoder(const struct obs_encoder_info *info)
|
||||
|
||||
void obs_register_service(const struct obs_service_info *info)
|
||||
{
|
||||
CHECK_REQUIRED_VAL(info, getname, obs_register_service);
|
||||
CHECK_REQUIRED_VAL(info, create, obs_register_service);
|
||||
CHECK_REQUIRED_VAL(info, destroy, obs_register_service);
|
||||
|
||||
REGISTER_OBS_DEF(cur_service_info_size, obs_service_info,
|
||||
obs->service_types, info);
|
||||
/* TODO */
|
||||
#pragma message ("TODO: implement obs_register_service")
|
||||
UNUSED_PARAMETER(info);
|
||||
}
|
||||
|
||||
void obs_regsiter_modal_ui(const struct obs_modal_ui *info)
|
||||
|
@ -31,8 +31,8 @@ static inline void signal_item_remove(struct obs_scene_item *item)
|
||||
|
||||
static const char *scene_getname(const char *locale)
|
||||
{
|
||||
/* TODO: locale lookup of display name */
|
||||
return "Scene";
|
||||
UNUSED_PARAMETER(locale);
|
||||
return "Scene internal source type";
|
||||
}
|
||||
|
||||
static void *scene_create(obs_data_t settings, struct obs_source *source)
|
||||
@ -51,6 +51,7 @@ static void *scene_create(obs_data_t settings, struct obs_source *source)
|
||||
goto fail;
|
||||
}
|
||||
|
||||
UNUSED_PARAMETER(settings);
|
||||
return scene;
|
||||
|
||||
fail:
|
||||
@ -142,11 +143,20 @@ static void scene_video_render(void *data, effect_t effect)
|
||||
}
|
||||
|
||||
pthread_mutex_unlock(&scene->mutex);
|
||||
|
||||
UNUSED_PARAMETER(effect);
|
||||
}
|
||||
|
||||
static uint32_t scene_getsize(void *data)
|
||||
static uint32_t scene_getwidth(void *data)
|
||||
{
|
||||
return 0;
|
||||
UNUSED_PARAMETER(data);
|
||||
return obs->video.base_width;
|
||||
}
|
||||
|
||||
static uint32_t scene_getheight(void *data)
|
||||
{
|
||||
UNUSED_PARAMETER(data);
|
||||
return obs->video.base_height;
|
||||
}
|
||||
|
||||
static const struct obs_source_info scene_info =
|
||||
@ -158,8 +168,8 @@ static const struct obs_source_info scene_info =
|
||||
.create = scene_create,
|
||||
.destroy = scene_destroy,
|
||||
.video_render = scene_video_render,
|
||||
.getwidth = scene_getsize,
|
||||
.getheight = scene_getsize,
|
||||
.getwidth = scene_getwidth,
|
||||
.getheight = scene_getheight,
|
||||
};
|
||||
|
||||
obs_scene_t obs_scene_create(const char *name)
|
||||
|
@ -22,7 +22,7 @@ struct obs_service_info {
|
||||
char *id;
|
||||
|
||||
const char *(*getname)(const char *locale);
|
||||
|
||||
#if 0
|
||||
void *(*create)(obs_data_t settings, struct service_data *service);
|
||||
void (*destroy)(void *data);
|
||||
|
||||
@ -32,4 +32,5 @@ struct obs_service_info {
|
||||
/* get stream url/key */
|
||||
/* get (viewers/etc) */
|
||||
/* send (current game/title/activate commercial/etc) */
|
||||
#endif
|
||||
};
|
||||
|
@ -172,7 +172,7 @@ void source_frame_init(struct source_frame *frame,
|
||||
enum video_format format, uint32_t width, uint32_t height)
|
||||
{
|
||||
size_t size;
|
||||
size_t offsets[MAX_VIDEO_PLANES];
|
||||
size_t offsets[MAX_AV_PLANES];
|
||||
int alignment = base_get_alignment();
|
||||
|
||||
memset(offsets, 0, sizeof(offsets));
|
||||
@ -255,7 +255,7 @@ static void obs_source_destroy(obs_source_t source)
|
||||
if (source->data)
|
||||
source->info.destroy(source->data);
|
||||
|
||||
for (i = 0; i < MAX_AUDIO_PLANES; i++)
|
||||
for (i = 0; i < MAX_AV_PLANES; i++)
|
||||
bfree(source->audio_data.data[i]);
|
||||
|
||||
audio_line_destroy(source->audio_line);
|
||||
@ -366,7 +366,7 @@ void obs_source_video_tick(obs_source_t source, float seconds)
|
||||
}
|
||||
|
||||
/* unless the value is 3+ hours worth of frames, this won't overflow */
|
||||
static inline uint64_t conv_frames_to_time(obs_source_t source, size_t frames)
|
||||
static inline uint64_t conv_frames_to_time(size_t frames)
|
||||
{
|
||||
const struct audio_output_info *info;
|
||||
info = audio_output_getinfo(obs->audio.audio);
|
||||
@ -423,7 +423,7 @@ static void source_output_audio_line(obs_source_t source,
|
||||
}
|
||||
|
||||
source->next_audio_ts_min = in.timestamp +
|
||||
conv_frames_to_time(source, in.frames);
|
||||
conv_frames_to_time(in.frames);
|
||||
|
||||
if (source->audio_reset_ref != 0)
|
||||
return;
|
||||
@ -499,24 +499,22 @@ static bool upload_frame(texture_t tex, const struct source_frame *frame)
|
||||
return false;
|
||||
|
||||
if (type == CONVERT_420)
|
||||
decompress_420(frame->data, frame->linesize,
|
||||
frame->width, frame->height, 0, frame->height,
|
||||
ptr, linesize);
|
||||
decompress_420((const uint8_t* const*)frame->data,
|
||||
frame->linesize,
|
||||
0, frame->height, ptr, linesize);
|
||||
|
||||
else if (type == CONVERT_NV12)
|
||||
decompress_nv12(frame->data, frame->linesize,
|
||||
frame->width, frame->height, 0, frame->height,
|
||||
ptr, linesize);
|
||||
decompress_nv12((const uint8_t* const*)frame->data,
|
||||
frame->linesize,
|
||||
0, frame->height, ptr, linesize);
|
||||
|
||||
else if (type == CONVERT_422_Y)
|
||||
decompress_422(frame->data[0], frame->linesize[0],
|
||||
frame->width, frame->height, 0, frame->height,
|
||||
ptr, linesize, true);
|
||||
0, frame->height, ptr, linesize, true);
|
||||
|
||||
else if (type == CONVERT_422_U)
|
||||
decompress_422(frame->data[0], frame->linesize[0],
|
||||
frame->width, frame->height, 0, frame->height,
|
||||
ptr, linesize, false);
|
||||
0, frame->height, ptr, linesize, false);
|
||||
|
||||
texture_unmap(tex);
|
||||
return true;
|
||||
@ -799,8 +797,7 @@ static void copy_frame_data(struct source_frame *dst,
|
||||
}
|
||||
}
|
||||
|
||||
static inline struct source_frame *cache_video(obs_source_t source,
|
||||
const struct source_frame *frame)
|
||||
static inline struct source_frame *cache_video(const struct source_frame *frame)
|
||||
{
|
||||
/* TODO: use an actual cache */
|
||||
struct source_frame *new_frame = source_frame_create(frame->format,
|
||||
@ -813,7 +810,7 @@ static inline struct source_frame *cache_video(obs_source_t source,
|
||||
void obs_source_output_video(obs_source_t source,
|
||||
const struct source_frame *frame)
|
||||
{
|
||||
struct source_frame *output = cache_video(source, frame);
|
||||
struct source_frame *output = cache_video(frame);
|
||||
|
||||
pthread_mutex_lock(&source->filter_mutex);
|
||||
output = filter_async_video(source, output);
|
||||
@ -875,15 +872,15 @@ static inline void reset_resampler(obs_source_t source,
|
||||
}
|
||||
|
||||
static inline void copy_audio_data(obs_source_t source,
|
||||
const void *const data[], uint32_t frames, uint64_t timestamp)
|
||||
const uint8_t *const data[], uint32_t frames, uint64_t ts)
|
||||
{
|
||||
size_t planes = audio_output_planes(obs->audio.audio);
|
||||
size_t blocksize = audio_output_blocksize(obs->audio.audio);
|
||||
size_t size = (size_t)frames * blocksize;
|
||||
bool resize = source->audio_storage_size < size;
|
||||
|
||||
source->audio_data.frames = frames;
|
||||
source->audio_data.timestamp = timestamp;
|
||||
source->audio_data.frames = frames;
|
||||
source->audio_data.timestamp = ts;
|
||||
|
||||
for (size_t i = 0; i < planes; i++) {
|
||||
/* ensure audio storage capacity */
|
||||
@ -911,7 +908,7 @@ static void process_audio(obs_source_t source, const struct source_audio *audio)
|
||||
return;
|
||||
|
||||
if (source->resampler) {
|
||||
uint8_t *output[MAX_AUDIO_PLANES];
|
||||
uint8_t *output[MAX_AV_PLANES];
|
||||
uint32_t frames;
|
||||
uint64_t offset;
|
||||
|
||||
@ -921,7 +918,7 @@ static void process_audio(obs_source_t source, const struct source_audio *audio)
|
||||
output, &frames, &offset,
|
||||
audio->data, audio->frames);
|
||||
|
||||
copy_audio_data(source, output, frames,
|
||||
copy_audio_data(source, (const uint8_t *const *)output, frames,
|
||||
audio->timestamp - offset);
|
||||
} else {
|
||||
copy_audio_data(source, audio->data, audio->frames,
|
||||
@ -933,7 +930,6 @@ void obs_source_output_audio(obs_source_t source,
|
||||
const struct source_audio *audio)
|
||||
{
|
||||
uint32_t flags = obs_source_get_output_flags(source);
|
||||
size_t blocksize = audio_output_blocksize(obs->audio.audio);
|
||||
struct filtered_audio *output;
|
||||
|
||||
process_audio(source, audio);
|
||||
@ -951,7 +947,7 @@ void obs_source_output_audio(obs_source_t source,
|
||||
if (source->timing_set || async) {
|
||||
struct audio_data data;
|
||||
|
||||
for (int i = 0; i < MAX_AUDIO_PLANES; i++)
|
||||
for (int i = 0; i < MAX_AV_PLANES; i++)
|
||||
data.data[i] = output->data[i];
|
||||
|
||||
data.frames = output->frames;
|
||||
@ -1022,7 +1018,6 @@ static inline struct source_frame *get_closest_frame(obs_source_t source,
|
||||
struct source_frame *obs_source_getframe(obs_source_t source)
|
||||
{
|
||||
struct source_frame *frame = NULL;
|
||||
uint64_t last_frame_time = source->last_frame_ts;
|
||||
int audio_time_refs = 0;
|
||||
uint64_t sys_time;
|
||||
|
||||
@ -1087,11 +1082,10 @@ void obs_source_gettype(obs_source_t source, enum obs_source_type *type,
|
||||
}
|
||||
|
||||
static inline void render_filter_bypass(obs_source_t target, effect_t effect,
|
||||
uint32_t width, uint32_t height, bool use_matrix)
|
||||
bool use_matrix)
|
||||
{
|
||||
const char *tech_name = use_matrix ? "DrawMatrix" : "Draw";
|
||||
technique_t tech = effect_gettechnique(effect, tech_name);
|
||||
eparam_t image = effect_getparambyname(effect, "image");
|
||||
size_t passes, i;
|
||||
|
||||
passes = technique_begin(tech);
|
||||
@ -1141,7 +1135,7 @@ void obs_source_process_filter(obs_source_t filter, effect_t effect,
|
||||
* using the filter effect instead of rendering to texture to reduce
|
||||
* the total number of passes */
|
||||
if (can_directly && expects_def && target == parent) {
|
||||
render_filter_bypass(target, effect, width, height, use_matrix);
|
||||
render_filter_bypass(target, effect, use_matrix);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -136,8 +136,7 @@ struct obs_source_info {
|
||||
* Gets the property information of this source
|
||||
*
|
||||
* @param locale The locale to translate with
|
||||
* @return The properties data. Caller is responsible for
|
||||
* freeing the data with obs_properties_destroy
|
||||
* @return The properties data
|
||||
*/
|
||||
obs_properties_t (*get_properties)(const char *locale);
|
||||
|
||||
|
@ -79,7 +79,7 @@ static inline void unmap_last_surface(struct obs_core_video *video)
|
||||
}
|
||||
|
||||
static inline void render_main_texture(struct obs_core_video *video,
|
||||
int cur_texture, int prev_texture)
|
||||
int cur_texture)
|
||||
{
|
||||
struct vec4 clear_color;
|
||||
vec4_set(&clear_color, 0.3f, 0.0f, 0.0f, 1.0f);
|
||||
@ -161,7 +161,7 @@ static inline void render_video(struct obs_core_video *video, int cur_texture,
|
||||
gs_enable_depthtest(false);
|
||||
gs_setcullmode(GS_NEITHER);
|
||||
|
||||
render_main_texture(video, cur_texture, prev_texture);
|
||||
render_main_texture(video, cur_texture);
|
||||
render_output_texture(video, cur_texture, prev_texture);
|
||||
stage_output_texture(video, cur_texture, prev_texture);
|
||||
|
||||
@ -171,7 +171,7 @@ static inline void render_video(struct obs_core_video *video, int cur_texture,
|
||||
}
|
||||
|
||||
/* TODO: replace with more optimal conversion */
|
||||
static inline bool download_frame(struct obs_core_video *video, int cur_texture,
|
||||
static inline bool download_frame(struct obs_core_video *video,
|
||||
int prev_texture, struct video_frame *frame)
|
||||
{
|
||||
stagesurf_t surface = video->copy_surfaces[prev_texture];
|
||||
@ -195,14 +195,12 @@ static bool convert_frame(struct obs_core_video *video,
|
||||
if (info->format == VIDEO_FORMAT_I420) {
|
||||
compress_uyvx_to_i420(
|
||||
frame->data[0], frame->linesize[0],
|
||||
info->width, info->height,
|
||||
0, info->height,
|
||||
new_frame->data, new_frame->linesize);
|
||||
|
||||
} else if (info->format == VIDEO_FORMAT_NV12) {
|
||||
compress_uyvx_to_nv12(
|
||||
frame->data[0], frame->linesize[0],
|
||||
info->width, info->height,
|
||||
0, info->height,
|
||||
new_frame->data, new_frame->linesize);
|
||||
|
||||
@ -211,7 +209,7 @@ static bool convert_frame(struct obs_core_video *video,
|
||||
return false;
|
||||
}
|
||||
|
||||
for (size_t i = 0; i < MAX_VIDEO_PLANES; i++) {
|
||||
for (size_t i = 0; i < MAX_AV_PLANES; i++) {
|
||||
frame->data[i] = new_frame->data[i];
|
||||
frame->linesize[i] = new_frame->linesize[i];
|
||||
}
|
||||
@ -246,7 +244,7 @@ static inline void output_frame(uint64_t timestamp)
|
||||
gs_entercontext(obs_graphics());
|
||||
|
||||
render_video(video, cur_texture, prev_texture);
|
||||
frame_ready = download_frame(video, cur_texture, prev_texture, &frame);
|
||||
frame_ready = download_frame(video, prev_texture, &frame);
|
||||
|
||||
gs_leavecontext();
|
||||
|
||||
@ -272,5 +270,6 @@ void *obs_video_thread(void *param)
|
||||
|
||||
}
|
||||
|
||||
UNUSED_PARAMETER(param);
|
||||
return NULL;
|
||||
}
|
||||
|
@ -187,7 +187,6 @@ static void obs_free_graphics(void)
|
||||
size_t i;
|
||||
|
||||
if (video->graphics) {
|
||||
int cur_texture = video->cur_texture;
|
||||
gs_entercontext(video->graphics);
|
||||
|
||||
if (video->mapped_surface)
|
||||
@ -246,7 +245,6 @@ static bool obs_init_data(void)
|
||||
{
|
||||
struct obs_core_data *data = &obs->data;
|
||||
pthread_mutexattr_t attr;
|
||||
bool success = false;
|
||||
|
||||
pthread_mutex_init_value(&obs->data.displays_mutex);
|
||||
|
||||
|
58
libobs/obs.h
58
libobs/obs.h
@ -125,7 +125,7 @@ struct obs_video_info {
|
||||
* audio data
|
||||
*/
|
||||
struct filtered_audio {
|
||||
uint8_t *data[MAX_AUDIO_PLANES];
|
||||
uint8_t *data[MAX_AV_PLANES];
|
||||
uint32_t frames;
|
||||
uint64_t timestamp;
|
||||
};
|
||||
@ -135,7 +135,7 @@ struct filtered_audio {
|
||||
* source audio. Audio is automatically resampled and remixed as necessary.
|
||||
*/
|
||||
struct source_audio {
|
||||
const uint8_t *data[MAX_AUDIO_PLANES];
|
||||
const uint8_t *data[MAX_AV_PLANES];
|
||||
uint32_t frames;
|
||||
|
||||
enum speaker_layout speakers;
|
||||
@ -155,8 +155,8 @@ struct source_audio {
|
||||
* converted to RGB via shader on the graphics processor.
|
||||
*/
|
||||
struct source_frame {
|
||||
uint8_t *data[MAX_VIDEO_PLANES];
|
||||
uint32_t linesize[MAX_VIDEO_PLANES];
|
||||
uint8_t *data[MAX_AV_PLANES];
|
||||
uint32_t linesize[MAX_AV_PLANES];
|
||||
uint32_t width;
|
||||
uint32_t height;
|
||||
uint64_t timestamp;
|
||||
@ -166,22 +166,6 @@ struct source_frame {
|
||||
bool flip;
|
||||
};
|
||||
|
||||
enum packet_priority {
|
||||
PACKET_PRIORITY_DISPOSABLE,
|
||||
PACKET_PRIORITY_LOW,
|
||||
PACKET_PRIORITY_PFRAME,
|
||||
PACKET_PRIORITY_IFRAME,
|
||||
PACKET_PRIORITY_OTHER /* audio usually */
|
||||
};
|
||||
|
||||
struct encoder_packet {
|
||||
int64_t dts;
|
||||
int64_t pts;
|
||||
void *data;
|
||||
size_t size;
|
||||
enum packet_priority priority;
|
||||
};
|
||||
|
||||
/* ------------------------------------------------------------------------- */
|
||||
/* OBS context */
|
||||
|
||||
@ -634,30 +618,28 @@ EXPORT obs_encoder_t obs_encoder_create(const char *id, const char *name,
|
||||
obs_data_t settings);
|
||||
EXPORT void obs_encoder_destroy(obs_encoder_t encoder);
|
||||
|
||||
EXPORT bool obs_encoder_reset(obs_encoder_t encoder, obs_data_t settings);
|
||||
|
||||
EXPORT bool obs_encoder_encode(obs_encoder_t encoder,
|
||||
const struct encoder_frame *frame,
|
||||
struct encoder_packet *packet,
|
||||
bool *received_packet);
|
||||
|
||||
EXPORT bool obs_encoder_start(obs_encoder_t encoder,
|
||||
void (*new_packet)(void *param, struct encoder_packet *packet),
|
||||
void *param);
|
||||
EXPORT void obs_encoder_stop(obs_encoder_t encoder,
|
||||
void (*new_packet)(void *param, struct encoder_packet *packet),
|
||||
void *param);
|
||||
|
||||
/** Returns the property list, if any. Free with obs_properties_destroy */
|
||||
EXPORT obs_properties_t obs_output_properties(const char *id,
|
||||
const char *locale);
|
||||
|
||||
EXPORT void obs_encoder_update(obs_encoder_t encoder, obs_data_t settings);
|
||||
|
||||
EXPORT bool obs_encoder_reset(obs_encoder_t encoder);
|
||||
|
||||
EXPORT bool obs_encoder_encode(obs_encoder_t encoder, void *frames,
|
||||
size_t size);
|
||||
EXPORT int obs_encoder_getheader(obs_encoder_t encoder,
|
||||
struct encoder_packet **packets);
|
||||
|
||||
EXPORT bool obs_encoder_start(obs_encoder_t encoder,
|
||||
void (*new_packet)(void *param, struct encoder_packet *packet),
|
||||
void *param);
|
||||
EXPORT bool obs_encoder_stop(obs_encoder_t encoder,
|
||||
void (*new_packet)(void *param, struct encoder_packet *packet),
|
||||
void *param);
|
||||
|
||||
EXPORT bool obs_encoder_setbitrate(obs_encoder_t encoder, uint32_t bitrate,
|
||||
uint32_t buffersize);
|
||||
|
||||
EXPORT bool obs_encoder_request_keyframe(obs_encoder_t encoder);
|
||||
EXPORT bool obs_encoder_get_extra_data(obs_encoder_t encoder,
|
||||
uint8_t **extra_data, size_t *size);
|
||||
|
||||
EXPORT obs_data_t obs_encoder_get_settings(obs_encoder_t encoder);
|
||||
|
||||
|
@ -21,6 +21,8 @@
|
||||
* bool, inline, stdint
|
||||
*/
|
||||
|
||||
#define UNUSED_PARAMETER(param) (void)param
|
||||
|
||||
#ifdef _MSC_VER
|
||||
#define FORCE_INLINE __forceinline
|
||||
#else
|
||||
|
@ -49,6 +49,8 @@ static void do_log(enum log_type type, const char *msg, va_list args)
|
||||
vprintf(msg, args);
|
||||
printf("\n");
|
||||
#endif
|
||||
|
||||
UNUSED_PARAMETER(type);
|
||||
}
|
||||
|
||||
bool OBSApp::InitGlobalConfigDefaults()
|
||||
|
@ -36,8 +36,8 @@ Q_DECLARE_METATYPE(OBSSceneItem);
|
||||
|
||||
OBSBasic::OBSBasic(QWidget *parent)
|
||||
: OBSMainWindow (parent),
|
||||
ui (new Ui::OBSBasic),
|
||||
outputTest (NULL)
|
||||
outputTest (NULL),
|
||||
ui (new Ui::OBSBasic)
|
||||
{
|
||||
ui->setupUi(this);
|
||||
}
|
||||
@ -102,6 +102,8 @@ void OBSBasic::UpdateSources(OBSScene scene)
|
||||
{
|
||||
OBSBasic *window = static_cast<OBSBasic*>(p);
|
||||
window->AddSceneItem(item);
|
||||
|
||||
UNUSED_PARAMETER(scene);
|
||||
return true;
|
||||
}, this);
|
||||
}
|
||||
@ -209,7 +211,6 @@ void OBSBasic::SceneItemAdded(void *data, calldata_t params)
|
||||
{
|
||||
OBSBasic *window = static_cast<OBSBasic*>(data);
|
||||
|
||||
obs_scene_t scene = (obs_scene_t)calldata_ptr(params, "scene");
|
||||
obs_sceneitem_t item = (obs_sceneitem_t)calldata_ptr(params, "item");
|
||||
|
||||
QMetaObject::invokeMethod(window, "AddSceneItem",
|
||||
@ -220,7 +221,6 @@ void OBSBasic::SceneItemRemoved(void *data, calldata_t params)
|
||||
{
|
||||
OBSBasic *window = static_cast<OBSBasic*>(data);
|
||||
|
||||
obs_scene_t scene = (obs_scene_t)calldata_ptr(params, "scene");
|
||||
obs_sceneitem_t item = (obs_sceneitem_t)calldata_ptr(params, "item");
|
||||
|
||||
QMetaObject::invokeMethod(window, "RemoveSceneItem",
|
||||
@ -267,6 +267,10 @@ void OBSBasic::ChannelChanged(void *data, calldata_t params)
|
||||
void OBSBasic::RenderMain(void *data, uint32_t cx, uint32_t cy)
|
||||
{
|
||||
obs_render_main_view();
|
||||
|
||||
UNUSED_PARAMETER(data);
|
||||
UNUSED_PARAMETER(cx);
|
||||
UNUSED_PARAMETER(cy);
|
||||
}
|
||||
|
||||
/* Main class functions */
|
||||
@ -344,10 +348,14 @@ void OBSBasic::ResizePreview(uint32_t cx, uint32_t cy)
|
||||
|
||||
void OBSBasic::closeEvent(QCloseEvent *event)
|
||||
{
|
||||
/* TODO */
|
||||
UNUSED_PARAMETER(event);
|
||||
}
|
||||
|
||||
void OBSBasic::changeEvent(QEvent *event)
|
||||
{
|
||||
/* TODO */
|
||||
UNUSED_PARAMETER(event);
|
||||
}
|
||||
|
||||
void OBSBasic::resizeEvent(QResizeEvent *event)
|
||||
@ -356,18 +364,23 @@ void OBSBasic::resizeEvent(QResizeEvent *event)
|
||||
|
||||
if (obs_get_video_info(&ovi))
|
||||
ResizePreview(ovi.base_width, ovi.base_height);
|
||||
|
||||
UNUSED_PARAMETER(event);
|
||||
}
|
||||
|
||||
void OBSBasic::on_action_New_triggered()
|
||||
{
|
||||
/* TODO */
|
||||
}
|
||||
|
||||
void OBSBasic::on_action_Open_triggered()
|
||||
{
|
||||
/* TODO */
|
||||
}
|
||||
|
||||
void OBSBasic::on_action_Save_triggered()
|
||||
{
|
||||
/* TODO */
|
||||
}
|
||||
|
||||
void OBSBasic::on_scenes_itemChanged(QListWidgetItem *item)
|
||||
@ -388,6 +401,8 @@ void OBSBasic::on_scenes_itemChanged(QListWidgetItem *item)
|
||||
|
||||
void OBSBasic::on_scenes_customContextMenuRequested(const QPoint &pos)
|
||||
{
|
||||
/* TODO */
|
||||
UNUSED_PARAMETER(pos);
|
||||
}
|
||||
|
||||
void OBSBasic::on_actionAddScene_triggered()
|
||||
@ -433,22 +448,29 @@ void OBSBasic::on_actionRemoveScene_triggered()
|
||||
|
||||
void OBSBasic::on_actionSceneProperties_triggered()
|
||||
{
|
||||
/* TODO */
|
||||
}
|
||||
|
||||
void OBSBasic::on_actionSceneUp_triggered()
|
||||
{
|
||||
/* TODO */
|
||||
}
|
||||
|
||||
void OBSBasic::on_actionSceneDown_triggered()
|
||||
{
|
||||
/* TODO */
|
||||
}
|
||||
|
||||
void OBSBasic::on_sources_itemChanged(QListWidgetItem *item)
|
||||
{
|
||||
/* TODO */
|
||||
UNUSED_PARAMETER(item);
|
||||
}
|
||||
|
||||
void OBSBasic::on_sources_customContextMenuRequested(const QPoint &pos)
|
||||
{
|
||||
/* TODO */
|
||||
UNUSED_PARAMETER(pos);
|
||||
}
|
||||
|
||||
void OBSBasic::AddSource(obs_scene_t scene, const char *id)
|
||||
@ -483,7 +505,7 @@ void OBSBasic::AddSource(obs_scene_t scene, const char *id)
|
||||
sourceSceneRefs[source] = 0;
|
||||
|
||||
obs_add_source(source);
|
||||
obs_sceneitem_t item = obs_scene_add(scene, source);
|
||||
obs_scene_add(scene, source);
|
||||
obs_source_release(source);
|
||||
}
|
||||
}
|
||||
|
@ -413,6 +413,8 @@ void OBSBasicSettings::on_language_currentIndexChanged(int index)
|
||||
{
|
||||
if (!loading)
|
||||
generalChanged = true;
|
||||
|
||||
UNUSED_PARAMETER(index);
|
||||
}
|
||||
|
||||
void OBSBasicSettings::on_renderer_currentIndexChanged(int index)
|
||||
@ -422,22 +424,30 @@ void OBSBasicSettings::on_renderer_currentIndexChanged(int index)
|
||||
ui->errorText->setText(
|
||||
QTStr("Settings.ProgramRestart"));
|
||||
}
|
||||
|
||||
UNUSED_PARAMETER(index);
|
||||
}
|
||||
|
||||
void OBSBasicSettings::on_fpsType_currentIndexChanged(int index)
|
||||
{
|
||||
if (!loading)
|
||||
videoChanged = true;
|
||||
|
||||
UNUSED_PARAMETER(index);
|
||||
}
|
||||
|
||||
void OBSBasicSettings::on_baseResolution_editTextChanged(const QString &text)
|
||||
{
|
||||
if (!loading && ValidResolutions(ui.get()))
|
||||
videoChanged = true;
|
||||
|
||||
UNUSED_PARAMETER(text);
|
||||
}
|
||||
|
||||
void OBSBasicSettings::on_outputResolution_editTextChanged(const QString &text)
|
||||
{
|
||||
if (!loading && ValidResolutions(ui.get()))
|
||||
videoChanged = true;
|
||||
|
||||
UNUSED_PARAMETER(text);
|
||||
}
|
||||
|
@ -34,8 +34,8 @@ struct ffmpeg_data {
|
||||
int frame_size;
|
||||
int total_frames;
|
||||
|
||||
struct circlebuf excess_frames[MAX_AUDIO_PLANES];
|
||||
uint8_t *samples[MAX_AUDIO_PLANES];
|
||||
struct circlebuf excess_frames[MAX_AV_PLANES];
|
||||
uint8_t *samples[MAX_AV_PLANES];
|
||||
AVFrame *aframe;
|
||||
int total_samples;
|
||||
|
||||
@ -97,8 +97,7 @@ static bool new_stream(struct ffmpeg_data *data, AVStream **stream,
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool open_video_codec(struct ffmpeg_data *data,
|
||||
struct obs_video_info *ovi)
|
||||
static bool open_video_codec(struct ffmpeg_data *data)
|
||||
{
|
||||
AVCodecContext *context = data->video->codec;
|
||||
int ret;
|
||||
@ -174,7 +173,7 @@ static bool create_video_stream(struct ffmpeg_data *data)
|
||||
if (data->output->oformat->flags & AVFMT_GLOBALHEADER)
|
||||
context->flags |= CODEC_FLAG_GLOBAL_HEADER;
|
||||
|
||||
if (!open_video_codec(data, &ovi))
|
||||
if (!open_video_codec(data))
|
||||
return false;
|
||||
|
||||
if (context->pix_fmt != AV_PIX_FMT_YUV420P)
|
||||
@ -184,8 +183,7 @@ static bool create_video_stream(struct ffmpeg_data *data)
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool open_audio_codec(struct ffmpeg_data *data,
|
||||
struct audio_output_info *aoi)
|
||||
static bool open_audio_codec(struct ffmpeg_data *data)
|
||||
{
|
||||
AVCodecContext *context = data->audio->codec;
|
||||
int ret;
|
||||
@ -242,7 +240,7 @@ static bool create_audio_stream(struct ffmpeg_data *data)
|
||||
if (data->output->oformat->flags & AVFMT_GLOBALHEADER)
|
||||
context->flags |= CODEC_FLAG_GLOBAL_HEADER;
|
||||
|
||||
return open_audio_codec(data, &aoi);
|
||||
return open_audio_codec(data);
|
||||
}
|
||||
|
||||
static inline bool init_streams(struct ffmpeg_data *data)
|
||||
@ -294,7 +292,7 @@ static void close_video(struct ffmpeg_data *data)
|
||||
|
||||
static void close_audio(struct ffmpeg_data *data)
|
||||
{
|
||||
for (size_t i = 0; i < MAX_AUDIO_PLANES; i++)
|
||||
for (size_t i = 0; i < MAX_AV_PLANES; i++)
|
||||
circlebuf_free(&data->excess_frames[i]);
|
||||
|
||||
av_freep(&data->samples[0]);
|
||||
@ -355,16 +353,20 @@ fail:
|
||||
|
||||
static const char *ffmpeg_output_getname(const char *locale)
|
||||
{
|
||||
UNUSED_PARAMETER(locale);
|
||||
return "FFmpeg file output";
|
||||
}
|
||||
|
||||
static void ffmpeg_log_callback(void *param, int bla, const char *format,
|
||||
va_list args)
|
||||
{
|
||||
blogva(LOG_INFO, format, args);
|
||||
blogva(LOG_DEBUG, format, args);
|
||||
|
||||
UNUSED_PARAMETER(param);
|
||||
UNUSED_PARAMETER(bla);
|
||||
}
|
||||
|
||||
static struct ffmpeg_output *ffmpeg_output_create(obs_data_t settings,
|
||||
static void *ffmpeg_output_create(obs_data_t settings,
|
||||
obs_output_t output)
|
||||
{
|
||||
struct ffmpeg_output *data = bzalloc(sizeof(struct ffmpeg_output));
|
||||
@ -372,23 +374,21 @@ static struct ffmpeg_output *ffmpeg_output_create(obs_data_t settings,
|
||||
|
||||
av_log_set_callback(ffmpeg_log_callback);
|
||||
|
||||
UNUSED_PARAMETER(settings);
|
||||
return data;
|
||||
}
|
||||
|
||||
static void ffmpeg_output_destroy(struct ffmpeg_output *data)
|
||||
static void ffmpeg_output_destroy(void *data)
|
||||
{
|
||||
if (data) {
|
||||
if (data->active)
|
||||
ffmpeg_data_free(&data->ff_data);
|
||||
struct ffmpeg_output *output = data;
|
||||
|
||||
if (output) {
|
||||
if (output->active)
|
||||
ffmpeg_data_free(&output->ff_data);
|
||||
bfree(data);
|
||||
}
|
||||
}
|
||||
|
||||
static void ffmpeg_output_update(struct ffmpeg_output *data,
|
||||
obs_data_t settings)
|
||||
{
|
||||
}
|
||||
|
||||
static inline int64_t rescale_ts(int64_t val, AVCodecContext *context,
|
||||
AVStream *stream)
|
||||
{
|
||||
@ -431,7 +431,8 @@ static void receive_video(void *param, const struct video_frame *frame)
|
||||
av_init_packet(&packet);
|
||||
|
||||
if (context->pix_fmt != AV_PIX_FMT_YUV420P)
|
||||
sws_scale(data->swscale, frame->data, frame->linesize,
|
||||
sws_scale(data->swscale, frame->data,
|
||||
(const int*)frame->linesize,
|
||||
0, context->height, data->dst_picture.data,
|
||||
data->dst_picture.linesize);
|
||||
else
|
||||
@ -464,7 +465,8 @@ static void receive_video(void *param, const struct video_frame *frame)
|
||||
context->time_base,
|
||||
data->video->time_base);
|
||||
|
||||
ret = av_interleaved_write_frame(data->output, &packet);
|
||||
ret = av_interleaved_write_frame(data->output,
|
||||
&packet);
|
||||
} else {
|
||||
ret = 0;
|
||||
}
|
||||
@ -478,20 +480,20 @@ static void receive_video(void *param, const struct video_frame *frame)
|
||||
data->total_frames++;
|
||||
}
|
||||
|
||||
static inline void encode_audio(struct ffmpeg_data *data,
|
||||
static inline void encode_audio(struct ffmpeg_data *output,
|
||||
struct AVCodecContext *context, size_t block_size)
|
||||
{
|
||||
AVPacket packet = {0};
|
||||
int ret, got_packet;
|
||||
size_t total_size = data->frame_size * block_size * context->channels;
|
||||
size_t total_size = output->frame_size * block_size * context->channels;
|
||||
|
||||
data->aframe->nb_samples = data->frame_size;
|
||||
data->aframe->pts = av_rescale_q(data->total_samples,
|
||||
output->aframe->nb_samples = output->frame_size;
|
||||
output->aframe->pts = av_rescale_q(output->total_samples,
|
||||
(AVRational){1, context->sample_rate},
|
||||
context->time_base);
|
||||
|
||||
ret = avcodec_fill_audio_frame(data->aframe, context->channels,
|
||||
context->sample_fmt, data->samples[0],
|
||||
ret = avcodec_fill_audio_frame(output->aframe, context->channels,
|
||||
context->sample_fmt, output->samples[0],
|
||||
(int)total_size, 1);
|
||||
if (ret < 0) {
|
||||
blog(LOG_ERROR, "receive_audio: avcodec_fill_audio_frame "
|
||||
@ -499,9 +501,9 @@ static inline void encode_audio(struct ffmpeg_data *data,
|
||||
return;
|
||||
}
|
||||
|
||||
data->total_samples += data->frame_size;
|
||||
output->total_samples += output->frame_size;
|
||||
|
||||
ret = avcodec_encode_audio2(context, &packet, data->aframe,
|
||||
ret = avcodec_encode_audio2(context, &packet, output->aframe,
|
||||
&got_packet);
|
||||
if (ret < 0) {
|
||||
blog(LOG_ERROR, "receive_audio: Error encoding audio: %s",
|
||||
@ -512,13 +514,13 @@ static inline void encode_audio(struct ffmpeg_data *data,
|
||||
if (!got_packet)
|
||||
return;
|
||||
|
||||
packet.pts = rescale_ts(packet.pts, context, data->audio);
|
||||
packet.dts = rescale_ts(packet.dts, context, data->audio);
|
||||
packet.pts = rescale_ts(packet.pts, context, output->audio);
|
||||
packet.dts = rescale_ts(packet.dts, context, output->audio);
|
||||
packet.duration = (int)av_rescale_q(packet.duration, context->time_base,
|
||||
data->audio->time_base);
|
||||
packet.stream_index = data->audio->index;
|
||||
output->audio->time_base);
|
||||
packet.stream_index = output->audio->index;
|
||||
|
||||
ret = av_interleaved_write_frame(data->output, &packet);
|
||||
ret = av_interleaved_write_frame(output->output, &packet);
|
||||
if (ret != 0)
|
||||
blog(LOG_ERROR, "receive_audio: Error writing audio: %s",
|
||||
av_err2str(ret));
|
||||
@ -548,8 +550,10 @@ static void receive_audio(void *param, const struct audio_data *frame)
|
||||
}
|
||||
}
|
||||
|
||||
static bool ffmpeg_output_start(struct ffmpeg_output *data)
|
||||
static bool ffmpeg_output_start(void *data)
|
||||
{
|
||||
struct ffmpeg_output *output = data;
|
||||
|
||||
video_t video = obs_video();
|
||||
audio_t audio = obs_audio();
|
||||
|
||||
@ -560,14 +564,14 @@ static bool ffmpeg_output_start(struct ffmpeg_output *data)
|
||||
}
|
||||
|
||||
const char *filename_test;
|
||||
obs_data_t settings = obs_output_get_settings(data->output);
|
||||
obs_data_t settings = obs_output_get_settings(output->output);
|
||||
filename_test = obs_data_getstring(settings, "filename");
|
||||
obs_data_release(settings);
|
||||
|
||||
if (!filename_test || !*filename_test)
|
||||
return false;
|
||||
|
||||
if (!ffmpeg_data_init(&data->ff_data, filename_test))
|
||||
if (!ffmpeg_data_init(&output->ff_data, filename_test))
|
||||
return false;
|
||||
|
||||
struct audio_convert_info aci;
|
||||
@ -580,26 +584,29 @@ static bool ffmpeg_output_start(struct ffmpeg_output *data)
|
||||
vci.width = 0;
|
||||
vci.height = 0;
|
||||
|
||||
video_output_connect(video, &vci, receive_video, data);
|
||||
audio_output_connect(audio, &aci, receive_audio, data);
|
||||
data->active = true;
|
||||
video_output_connect(video, &vci, receive_video, output);
|
||||
audio_output_connect(audio, &aci, receive_audio, output);
|
||||
output->active = true;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static void ffmpeg_output_stop(struct ffmpeg_output *data)
|
||||
static void ffmpeg_output_stop(void *data)
|
||||
{
|
||||
if (data->active) {
|
||||
data->active = false;
|
||||
struct ffmpeg_output *output = data;
|
||||
|
||||
if (output->active) {
|
||||
output->active = false;
|
||||
video_output_disconnect(obs_video(), receive_video, data);
|
||||
audio_output_disconnect(obs_audio(), receive_audio, data);
|
||||
ffmpeg_data_free(&data->ff_data);
|
||||
ffmpeg_data_free(&output->ff_data);
|
||||
}
|
||||
}
|
||||
|
||||
static bool ffmpeg_output_active(struct ffmpeg_output *data)
|
||||
static bool ffmpeg_output_active(void *data)
|
||||
{
|
||||
return data->active;
|
||||
struct ffmpeg_output *output = data;
|
||||
return output->active;
|
||||
}
|
||||
|
||||
struct obs_output_info ffmpeg_output = {
|
||||
@ -607,7 +614,6 @@ struct obs_output_info ffmpeg_output = {
|
||||
.getname = ffmpeg_output_getname,
|
||||
.create = ffmpeg_output_create,
|
||||
.destroy = ffmpeg_output_destroy,
|
||||
.update = ffmpeg_output_update,
|
||||
.start = ffmpeg_output_start,
|
||||
.stop = ffmpeg_output_stop,
|
||||
.active = ffmpeg_output_active
|
||||
|
@ -7,5 +7,7 @@ extern struct obs_output_info ffmpeg_output;
|
||||
bool obs_module_load(uint32_t obs_version)
|
||||
{
|
||||
obs_register_output(&ffmpeg_output);
|
||||
|
||||
UNUSED_PARAMETER(obs_version);
|
||||
return true;
|
||||
}
|
||||
|
@ -22,22 +22,21 @@ struct desktop_tex {
|
||||
IOSurfaceRef current, prev;
|
||||
};
|
||||
|
||||
static IOSurfaceRef current = NULL,
|
||||
prev = NULL;
|
||||
static pthread_mutex_t c_mutex;
|
||||
|
||||
static const char *osx_desktop_test_getname(const char *locale)
|
||||
{
|
||||
UNUSED_PARAMETER(locale);
|
||||
return "OSX Monitor Capture";
|
||||
}
|
||||
|
||||
static void osx_desktop_test_destroy(struct desktop_tex *rt)
|
||||
static void osx_desktop_test_destroy(void *data)
|
||||
{
|
||||
struct desktop_tex *rt = data;
|
||||
|
||||
if (rt) {
|
||||
pthread_mutex_lock(&rt->mutex);
|
||||
gs_entercontext(obs_graphics());
|
||||
|
||||
if (current) {
|
||||
if (rt->current) {
|
||||
IOSurfaceDecrementUseCount(rt->current);
|
||||
CFRelease(rt->current);
|
||||
}
|
||||
@ -54,8 +53,7 @@ static void osx_desktop_test_destroy(struct desktop_tex *rt)
|
||||
}
|
||||
}
|
||||
|
||||
static struct desktop_tex *osx_desktop_test_create(const char *settings,
|
||||
obs_source_t source)
|
||||
static void *osx_desktop_test_create(obs_data_t settings, obs_source_t source)
|
||||
{
|
||||
struct desktop_tex *rt = bzalloc(sizeof(struct desktop_tex));
|
||||
char *effect_file;
|
||||
@ -123,6 +121,10 @@ static struct desktop_tex *osx_desktop_test_create(const char *settings,
|
||||
CFRetain(rt->current);
|
||||
IOSurfaceIncrementUseCount(rt->current);
|
||||
pthread_mutex_unlock(&rt->mutex);
|
||||
|
||||
UNUSED_PARAMETER(status);
|
||||
UNUSED_PARAMETER(displayTime);
|
||||
UNUSED_PARAMETER(updateRef);
|
||||
}
|
||||
);
|
||||
|
||||
@ -133,12 +135,15 @@ static struct desktop_tex *osx_desktop_test_create(const char *settings,
|
||||
return NULL;
|
||||
}
|
||||
|
||||
UNUSED_PARAMETER(source);
|
||||
UNUSED_PARAMETER(settings);
|
||||
return rt;
|
||||
}
|
||||
|
||||
static void osx_desktop_test_video_render(struct desktop_tex *rt,
|
||||
obs_source_t filter_target)
|
||||
static void osx_desktop_test_video_render(void *data, effect_t effect)
|
||||
{
|
||||
struct desktop_tex *rt = data;
|
||||
|
||||
pthread_mutex_lock(&rt->mutex);
|
||||
|
||||
if (rt->prev != rt->current) {
|
||||
@ -166,15 +171,19 @@ static void osx_desktop_test_video_render(struct desktop_tex *rt,
|
||||
|
||||
fail:
|
||||
pthread_mutex_unlock(&rt->mutex);
|
||||
|
||||
UNUSED_PARAMETER(effect);
|
||||
}
|
||||
|
||||
static uint32_t osx_desktop_test_getwidth(struct desktop_tex *rt)
|
||||
static uint32_t osx_desktop_test_getwidth(void *data)
|
||||
{
|
||||
struct desktop_tex *rt = data;
|
||||
return rt->width;
|
||||
}
|
||||
|
||||
static uint32_t osx_desktop_test_getheight(struct desktop_tex *rt)
|
||||
static uint32_t osx_desktop_test_getheight(void *data)
|
||||
{
|
||||
struct desktop_tex *rt = data;
|
||||
return rt->height;
|
||||
}
|
||||
|
||||
|
@ -7,11 +7,14 @@ struct test_filter {
|
||||
|
||||
static const char *filter_getname(const char *locale)
|
||||
{
|
||||
UNUSED_PARAMETER(locale);
|
||||
return "Test";
|
||||
}
|
||||
|
||||
static void filter_destroy(struct test_filter *tf)
|
||||
static void filter_destroy(void *data)
|
||||
{
|
||||
struct test_filter *tf = data;
|
||||
|
||||
if (tf) {
|
||||
gs_entercontext(obs_graphics());
|
||||
|
||||
@ -22,8 +25,7 @@ static void filter_destroy(struct test_filter *tf)
|
||||
}
|
||||
}
|
||||
|
||||
static struct test_filter *filter_create(obs_data_t settings,
|
||||
obs_source_t source)
|
||||
static void *filter_create(obs_data_t settings, obs_source_t source)
|
||||
{
|
||||
struct test_filter *tf = bzalloc(sizeof(struct test_filter));
|
||||
char *effect_file;
|
||||
@ -42,13 +44,17 @@ static struct test_filter *filter_create(obs_data_t settings,
|
||||
|
||||
gs_leavecontext();
|
||||
|
||||
UNUSED_PARAMETER(settings);
|
||||
return tf;
|
||||
}
|
||||
|
||||
static void filter_render(struct test_filter *tf, effect_t effect)
|
||||
static void filter_render(void *data, effect_t effect)
|
||||
{
|
||||
struct test_filter *tf = data;
|
||||
obs_source_process_filter(tf->source, tf->whatever, 0, 0, GS_RGBA,
|
||||
ALLOW_DIRECT_RENDERING);
|
||||
|
||||
UNUSED_PARAMETER(effect);
|
||||
}
|
||||
|
||||
struct obs_source_info test_filter = {
|
||||
|
@ -20,5 +20,6 @@ bool obs_module_load(uint32_t libobs_version)
|
||||
obs_register_source(&osx_desktop);
|
||||
#endif
|
||||
|
||||
UNUSED_PARAMETER(libobs_version);
|
||||
return true;
|
||||
}
|
||||
|
@ -7,11 +7,14 @@ struct random_tex {
|
||||
|
||||
static const char *random_getname(const char *locale)
|
||||
{
|
||||
UNUSED_PARAMETER(locale);
|
||||
return "20x20 Random Pixel Texture Source (Test)";
|
||||
}
|
||||
|
||||
static void random_destroy(struct random_tex *rt)
|
||||
static void random_destroy(void *data)
|
||||
{
|
||||
struct random_tex *rt = data;
|
||||
|
||||
if (rt) {
|
||||
gs_entercontext(obs_graphics());
|
||||
|
||||
@ -22,8 +25,7 @@ static void random_destroy(struct random_tex *rt)
|
||||
}
|
||||
}
|
||||
|
||||
static struct random_tex *random_create(obs_data_t settings,
|
||||
obs_source_t source)
|
||||
static void *random_create(obs_data_t settings, obs_source_t source)
|
||||
{
|
||||
struct random_tex *rt = bzalloc(sizeof(struct random_tex));
|
||||
uint32_t *pixels = bmalloc(20*20*4);
|
||||
@ -53,23 +55,28 @@ static struct random_tex *random_create(obs_data_t settings,
|
||||
|
||||
gs_leavecontext();
|
||||
|
||||
UNUSED_PARAMETER(settings);
|
||||
UNUSED_PARAMETER(source);
|
||||
return rt;
|
||||
}
|
||||
|
||||
static void random_video_render(struct random_tex *rt, effect_t effect)
|
||||
static void random_video_render(void *data, effect_t effect)
|
||||
{
|
||||
struct random_tex *rt = data;
|
||||
eparam_t image = effect_getparambyname(effect, "image");
|
||||
effect_settexture(effect, image, rt->texture);
|
||||
gs_draw_sprite(rt->texture, 0, 0, 0);
|
||||
}
|
||||
|
||||
static uint32_t random_getwidth(struct random_tex *rt)
|
||||
static uint32_t random_getwidth(void *data)
|
||||
{
|
||||
struct random_tex *rt = data;
|
||||
return texture_getwidth(rt->texture);
|
||||
}
|
||||
|
||||
static uint32_t random_getheight(struct random_tex *rt)
|
||||
static uint32_t random_getheight(void *data)
|
||||
{
|
||||
struct random_tex *rt = data;
|
||||
return texture_getheight(rt->texture);
|
||||
}
|
||||
|
||||
|
@ -14,7 +14,10 @@ struct sinewave_data {
|
||||
/* middle C */
|
||||
static const double rate = 261.63/48000.0;
|
||||
|
||||
#ifndef M_PI
|
||||
#define M_PI 3.1415926535897932384626433832795
|
||||
#endif
|
||||
|
||||
#define M_PI_X2 M_PI*2
|
||||
|
||||
static void *sinewave_thread(void *pdata)
|
||||
@ -57,11 +60,14 @@ static void *sinewave_thread(void *pdata)
|
||||
|
||||
static const char *sinewave_getname(const char *locale)
|
||||
{
|
||||
UNUSED_PARAMETER(locale);
|
||||
return "Sinewave Sound Source (Test)";
|
||||
}
|
||||
|
||||
static void sinewave_destroy(struct sinewave_data *swd)
|
||||
static void sinewave_destroy(void *data)
|
||||
{
|
||||
struct sinewave_data *swd = data;
|
||||
|
||||
if (swd) {
|
||||
if (swd->initialized_thread) {
|
||||
void *ret;
|
||||
@ -74,7 +80,7 @@ static void sinewave_destroy(struct sinewave_data *swd)
|
||||
}
|
||||
}
|
||||
|
||||
static struct sinewave_data *sinewave_create(obs_data_t settings,
|
||||
static void *sinewave_create(obs_data_t settings,
|
||||
obs_source_t source)
|
||||
{
|
||||
struct sinewave_data *swd = bzalloc(sizeof(struct sinewave_data));
|
||||
@ -86,6 +92,8 @@ static struct sinewave_data *sinewave_create(obs_data_t settings,
|
||||
goto fail;
|
||||
|
||||
swd->initialized_thread = true;
|
||||
|
||||
UNUSED_PARAMETER(settings);
|
||||
return swd;
|
||||
|
||||
fail:
|
||||
|
@ -196,7 +196,6 @@
|
||||
<RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary>
|
||||
<AdditionalIncludeDirectories>../../../libobs/util/vc</AdditionalIncludeDirectories>
|
||||
<DebugInformationFormat>ProgramDatabase</DebugInformationFormat>
|
||||
<InlineFunctionExpansion>OnlyExplicitInline</InlineFunctionExpansion>
|
||||
</ClCompile>
|
||||
<Link>
|
||||
<SubSystem>Windows</SubSystem>
|
||||
@ -218,7 +217,6 @@
|
||||
<ExceptionHandling>false</ExceptionHandling>
|
||||
<RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary>
|
||||
<AdditionalIncludeDirectories>../../../libobs/util/vc</AdditionalIncludeDirectories>
|
||||
<InlineFunctionExpansion>OnlyExplicitInline</InlineFunctionExpansion>
|
||||
</ClCompile>
|
||||
<Link>
|
||||
<SubSystem>Windows</SubSystem>
|
||||
|
Loading…
x
Reference in New Issue
Block a user