Implement output, improve video/audio subsystems

- Fill in the rest of the FFmpeg test output code for testing so it
   actually properly outputs data.

 - Improve the main video subsystem to be a bit more optimal and
   automatically output I420 or NV12 if needed.

 - Fix audio subsystem insertation and byte calculation.  Now it will
   seamlessly insert new audio data in to the audio stream based upon
   its timestamp value.  (Be extremely cautious when using floating
   point calculations for important things like this, and always round
   your values and check your values)

 - Use 32 byte alignment in case of future optimizations and export a
   function to get the current alignment.

 - Make os_sleepto_ns return true if slept, false if the time has
   already been passed before the call.

 - Fix sinewave output so that it actually properly calculates a middle
   C sinewave.

 - Change the use of row_bytes to linesize (also makes it a bit more
   consistent with FFmpeg's naming as well)
master
jp9000 2014-02-09 05:51:06 -07:00
parent 4461281a3b
commit 6c92cf5841
30 changed files with 500 additions and 312 deletions

View File

@ -133,7 +133,7 @@ EXPORT void texture_destroy(texture_t tex);
EXPORT uint32_t texture_getwidth(texture_t tex);
EXPORT uint32_t texture_getheight(texture_t tex);
EXPORT enum gs_color_format texture_getcolorformat(texture_t tex);
EXPORT bool texture_map(texture_t tex, void **ptr, uint32_t *row_bytes);
EXPORT bool texture_map(texture_t tex, void **ptr, uint32_t *linesize);
EXPORT void texture_unmap(texture_t tex);
EXPORT void cubetexture_destroy(texture_t cubetex);
@ -151,7 +151,7 @@ EXPORT uint32_t stagesurface_getwidth(stagesurf_t stagesurf);
EXPORT uint32_t stagesurface_getheight(stagesurf_t stagesurf);
EXPORT enum gs_color_format stagesurface_getcolorformat(stagesurf_t stagesurf);
EXPORT bool stagesurface_map(stagesurf_t stagesurf, const void **data,
uint32_t *row_bytes);
uint32_t *linesize);
EXPORT void stagesurface_unmap(stagesurf_t stagesurf);
EXPORT void zstencil_destroy(zstencil_t zstencil);

View File

@ -1443,7 +1443,7 @@ enum gs_color_format texture_getcolorformat(texture_t tex)
return static_cast<gs_texture_2d*>(tex)->format;
}
bool texture_map(texture_t tex, void **ptr, uint32_t *row_bytes)
bool texture_map(texture_t tex, void **ptr, uint32_t *linesize)
{
HRESULT hr;
@ -1459,7 +1459,7 @@ bool texture_map(texture_t tex, void **ptr, uint32_t *row_bytes)
return false;
*ptr = map.pData;
*row_bytes = map.RowPitch;
*linesize = map.RowPitch;
return true;
}
@ -1548,7 +1548,7 @@ enum gs_color_format stagesurface_getcolorformat(stagesurf_t stagesurf)
}
bool stagesurface_map(stagesurf_t stagesurf, const void **data,
uint32_t *row_bytes)
uint32_t *linesize)
{
D3D11_MAPPED_SUBRESOURCE map;
if (FAILED(stagesurf->device->context->Map(stagesurf->texture, 0,
@ -1556,7 +1556,7 @@ bool stagesurface_map(stagesurf_t stagesurf, const void **data,
return false;
*data = map.pData;
*row_bytes = map.RowPitch;
*linesize = map.RowPitch;
return true;
}

View File

@ -127,7 +127,7 @@ EXPORT void texture_destroy(texture_t tex);
EXPORT uint32_t texture_getwidth(texture_t tex);
EXPORT uint32_t texture_getheight(texture_t tex);
EXPORT enum gs_color_format texture_getcolorformat(texture_t tex);
EXPORT bool texture_map(texture_t tex, void **ptr, uint32_t *row_bytes);
EXPORT bool texture_map(texture_t tex, void **ptr, uint32_t *linesize);
EXPORT void texture_unmap(texture_t tex);
EXPORT bool texture_isrect(texture_t tex);
@ -146,7 +146,7 @@ EXPORT uint32_t stagesurface_getwidth(stagesurf_t stagesurf);
EXPORT uint32_t stagesurface_getheight(stagesurf_t stagesurf);
EXPORT enum gs_color_format stagesurface_getcolorformat(stagesurf_t stagesurf);
EXPORT bool stagesurface_map(stagesurf_t stagesurf, const void **data,
uint32_t *row_bytes);
uint32_t *linesize);
EXPORT void stagesurface_unmap(stagesurf_t stagesurf);
EXPORT void zstencil_destroy(zstencil_t zstencil);

View File

@ -179,7 +179,7 @@ enum gs_color_format stagesurface_getcolorformat(stagesurf_t stagesurf)
}
bool stagesurface_map(stagesurf_t stagesurf, const void **data,
uint32_t *row_bytes)
uint32_t *linesize)
{
if (!gl_bind_buffer(GL_PIXEL_PACK_BUFFER, stagesurf->pack_buffer))
goto fail;
@ -190,7 +190,7 @@ bool stagesurface_map(stagesurf_t stagesurf, const void **data,
gl_bind_buffer(GL_PIXEL_PACK_BUFFER, 0);
*row_bytes = stagesurf->bytes_per_pixel * stagesurf->width;
*linesize = stagesurf->bytes_per_pixel * stagesurf->width;
return true;
fail:

View File

@ -162,7 +162,7 @@ enum gs_color_format texture_getcolorformat(texture_t tex)
return tex->format;
}
bool texture_map(texture_t tex, void **ptr, uint32_t *row_bytes)
bool texture_map(texture_t tex, void **ptr, uint32_t *linesize)
{
struct gs_texture_2d *tex2d = (struct gs_texture_2d*)tex;
@ -183,8 +183,8 @@ bool texture_map(texture_t tex, void **ptr, uint32_t *row_bytes)
gl_bind_buffer(GL_PIXEL_UNPACK_BUFFER, 0);
*row_bytes = tex2d->width * gs_get_format_bpp(tex->format) / 8;
*row_bytes = (*row_bytes + 3) & 0xFFFFFFFC;
*linesize = tex2d->width * gs_get_format_bpp(tex->format) / 8;
*linesize = (*linesize + 3) & 0xFFFFFFFC;
return true;
fail:

View File

@ -138,7 +138,7 @@ struct gs_exports {
uint32_t (*texture_getheight)(texture_t tex);
enum gs_color_format (*texture_getcolorformat)(texture_t tex);
bool (*texture_map)(texture_t tex, void **ptr,
uint32_t *row_bytes);
uint32_t *linesize);
void (*texture_unmap)(texture_t tex);
bool (*texture_isrect)(texture_t tex);
@ -158,7 +158,7 @@ struct gs_exports {
enum gs_color_format (*stagesurface_getcolorformat)(
stagesurf_t stagesurf);
bool (*stagesurface_map)(stagesurf_t stagesurf, const void **data,
uint32_t *row_bytes);
uint32_t *linesize);
void (*stagesurface_unmap)(stagesurf_t stagesurf);
void (*zstencil_destroy)(zstencil_t zstencil);

View File

@ -782,39 +782,39 @@ void gs_viewport_pop(void)
da_pop_back(thread_graphics->viewport_stack);
}
void texture_setimage(texture_t tex, const void *data, uint32_t row_bytes,
void texture_setimage(texture_t tex, const void *data, uint32_t linesize,
bool flip)
{
void *ptr;
uint32_t row_bytes_out;
uint32_t linesize_out;
uint32_t row_copy;
int32_t height = (int32_t)texture_getheight(tex);
int32_t y;
if (!texture_map(tex, &ptr, &row_bytes_out))
if (!texture_map(tex, &ptr, &linesize_out))
return;
row_copy = (row_bytes < row_bytes_out) ? row_bytes : row_bytes_out;
row_copy = (linesize < linesize_out) ? linesize : linesize_out;
if (flip) {
for (y = height-1; y >= 0; y--)
memcpy((uint8_t*)ptr + (uint32_t)y * row_bytes_out,
(uint8_t*)data + (uint32_t)y * row_bytes,
memcpy((uint8_t*)ptr + (uint32_t)y * linesize_out,
(uint8_t*)data + (uint32_t)y * linesize,
row_copy);
} else if (row_bytes == row_bytes_out) {
} else if (linesize == linesize_out) {
memcpy(ptr, data, row_copy * height);
} else {
for (y = 0; y < height; y++)
memcpy((uint8_t*)ptr + (uint32_t)y * row_bytes_out,
(uint8_t*)data + (uint32_t)y * row_bytes,
memcpy((uint8_t*)ptr + (uint32_t)y * linesize_out,
(uint8_t*)data + (uint32_t)y * linesize,
row_copy);
}
}
void cubetexture_setimage(texture_t cubetex, uint32_t side, const void *data,
uint32_t row_bytes, bool invert)
uint32_t linesize, bool invert)
{
/* TODO */
}
@ -1433,10 +1433,10 @@ enum gs_color_format texture_getcolorformat(texture_t tex)
return graphics->exports.texture_getcolorformat(tex);
}
bool texture_map(texture_t tex, void **ptr, uint32_t *row_bytes)
bool texture_map(texture_t tex, void **ptr, uint32_t *linesize)
{
graphics_t graphics = thread_graphics;
return graphics->exports.texture_map(tex, ptr, row_bytes);
return graphics->exports.texture_map(tex, ptr, linesize);
}
void texture_unmap(texture_t tex)
@ -1527,10 +1527,10 @@ enum gs_color_format stagesurface_getcolorformat(stagesurf_t stagesurf)
}
bool stagesurface_map(stagesurf_t stagesurf, const void **data,
uint32_t *row_bytes)
uint32_t *linesize)
{
graphics_t graphics = thread_graphics;
return graphics->exports.stagesurface_map(stagesurf, data, row_bytes);
return graphics->exports.stagesurface_map(stagesurf, data, linesize);
}
void stagesurface_unmap(stagesurf_t stagesurf)

View File

@ -515,9 +515,9 @@ EXPORT void gs_viewport_push(void);
EXPORT void gs_viewport_pop(void);
EXPORT void texture_setimage(texture_t tex, const void *data,
uint32_t row_bytes, bool invert);
uint32_t linesize, bool invert);
EXPORT void cubetexture_setimage(texture_t cubetex, uint32_t side,
const void *data, uint32_t row_bytes, bool invert);
const void *data, uint32_t linesize, bool invert);
EXPORT void gs_perspective(float fovy, float aspect, float znear, float zfar);
@ -640,7 +640,7 @@ EXPORT void texture_destroy(texture_t tex);
EXPORT uint32_t texture_getwidth(texture_t tex);
EXPORT uint32_t texture_getheight(texture_t tex);
EXPORT enum gs_color_format texture_getcolorformat(texture_t tex);
EXPORT bool texture_map(texture_t tex, void **ptr, uint32_t *row_bytes);
EXPORT bool texture_map(texture_t tex, void **ptr, uint32_t *linesize);
EXPORT void texture_unmap(texture_t tex);
/** special-case function (GL only) - specifies whether the texture is a
* GL_TEXTURE_RECTANGLE type, which doesn't use normalized texture
@ -662,7 +662,7 @@ EXPORT uint32_t stagesurface_getwidth(stagesurf_t stagesurf);
EXPORT uint32_t stagesurface_getheight(stagesurf_t stagesurf);
EXPORT enum gs_color_format stagesurface_getcolorformat(stagesurf_t stagesurf);
EXPORT bool stagesurface_map(stagesurf_t stagesurf, const void **data,
uint32_t *row_bytes);
uint32_t *linesize);
EXPORT void stagesurface_unmap(stagesurf_t stagesurf);
EXPORT void zstencil_destroy(zstencil_t zstencil);

View File

@ -15,6 +15,8 @@
along with this program. If not, see <http://www.gnu.org/licenses/>.
******************************************************************************/
#include <math.h>
#include "../util/threading.h"
#include "../util/darray.h"
#include "../util/circlebuf.h"
@ -22,6 +24,10 @@
#include "audio-io.h"
/* #define DEBUG_AUDIO */
#define nop() do {int invalid = 0;} while(0)
struct audio_input {
struct audio_convert_info conversion;
void (*callback)(void *param, const struct audio_data *data);
@ -90,35 +96,63 @@ static inline void audio_output_removeline(struct audio_output *audio,
audio_line_destroy_data(line);
}
static inline uint32_t time_to_frames(audio_t audio, uint64_t offset)
/* ------------------------------------------------------------------------- */
/* the following functions are used to calculate frame offsets based upon
* timestamps. this will actually work accurately as long as you handle the
* values correctly */
static inline double ts_to_frames(audio_t audio, uint64_t ts)
{
double audio_offset_d = (double)offset;
double audio_offset_d = (double)ts;
audio_offset_d /= 1000000000.0;
audio_offset_d *= (double)audio->info.samples_per_sec;
return (uint32_t)audio_offset_d;
return audio_offset_d;
}
static inline size_t time_to_bytes(audio_t audio, uint64_t offset)
static inline double positive_round(double val)
{
return time_to_frames(audio, offset) * audio->block_size;
return floor(val+0.5);
}
static size_t ts_diff_frames(audio_t audio, uint64_t ts1, uint64_t ts2)
{
double diff = ts_to_frames(audio, ts1) - ts_to_frames(audio, ts2);
return (size_t)positive_round(diff);
}
static size_t ts_diff_bytes(audio_t audio, uint64_t ts1, uint64_t ts2)
{
return ts_diff_frames(audio, ts1, ts2) * audio->block_size;
}
/* unless the value is 3+ hours worth of frames, this won't overflow */
static inline uint64_t conv_frames_to_time(audio_t audio, uint32_t frames)
{
return (uint64_t)frames * 1000000000ULL /
(uint64_t)audio->info.samples_per_sec;
}
/* ------------------------------------------------------------------------- */
static inline void clear_excess_audio_data(struct audio_line *line,
uint64_t size)
uint64_t prev_time)
{
size_t size = ts_diff_bytes(line->audio, prev_time,
line->base_timestamp);
blog(LOG_WARNING, "Excess audio data for audio line '%s', somehow "
"audio data went back in time by %lu bytes. "
"prev_time: %llu, line->base_timestamp: %llu",
line->name, (uint32_t)size,
prev_time, line->base_timestamp);
for (size_t i = 0; i < line->audio->planes; i++) {
size_t clear_size = (size > line->buffers[i].size) ?
(size_t)size : line->buffers[i].size;
circlebuf_pop_front(&line->buffers[i], NULL, clear_size);
}
blog(LOG_WARNING, "Excess audio data for audio line '%s', somehow "
"audio data went back in time by %llu bytes",
line->name, size);
}
static inline uint64_t min_uint64(uint64_t a, uint64_t b)
@ -126,31 +160,35 @@ static inline uint64_t min_uint64(uint64_t a, uint64_t b)
return a < b ? a : b;
}
static inline void mix_audio_line(struct audio_output *audio,
static inline size_t min_size(size_t a, size_t b)
{
return a < b ? a : b;
}
/* TODO: this just overwrites. handle actual mixing */
static inline bool mix_audio_line(struct audio_output *audio,
struct audio_line *line, size_t size, uint64_t timestamp)
{
/* TODO: this just overwrites. handle actual mixing */
if (!line->buffers[0].size) {
if (!line->alive)
audio_output_removeline(audio, line);
return;
}
size_t time_offset = time_to_bytes(audio,
line->base_timestamp - timestamp);
size_t time_offset = ts_diff_bytes(audio,
line->base_timestamp, timestamp);
if (time_offset > size)
return;
return false;
size -= time_offset;
#ifdef DEBUG_AUDIO
blog(LOG_DEBUG, "shaved off %lu bytes", size);
#endif
for (size_t i = 0; i < audio->planes; i++) {
size_t pop_size;
pop_size = (size_t)min_uint64(size, line->buffers[i].size);
size_t pop_size = min_size(size, line->buffers[i].size);
circlebuf_pop_front(&line->buffers[i],
audio->mix_buffers[i].array + time_offset,
pop_size);
}
return true;
}
static inline void do_audio_output(struct audio_output *audio,
@ -172,34 +210,61 @@ static inline void do_audio_output(struct audio_output *audio,
pthread_mutex_unlock(&audio->input_mutex);
}
static void mix_and_output(struct audio_output *audio, uint64_t audio_time,
static uint64_t mix_and_output(struct audio_output *audio, uint64_t audio_time,
uint64_t prev_time)
{
struct audio_line *line = audio->first_line;
uint64_t time_offset = audio_time - prev_time;
uint32_t frames = time_to_frames(audio, time_offset);
uint32_t frames = (uint32_t)ts_diff_frames(audio, audio_time,
prev_time);
size_t bytes = frames * audio->block_size;
#ifdef DEBUG_AUDIO
blog(LOG_DEBUG, "audio_time: %llu, prev_time: %llu, bytes: %lu",
audio_time, prev_time, bytes);
#endif
/* return an adjusted audio_time according to the amount
* of data that was sampled to ensure seamless transmission */
audio_time = prev_time + conv_frames_to_time(audio, frames);
/* resize and clear mix buffers */
for (size_t i = 0; i < audio->planes; i++) {
da_resize(audio->mix_buffers[i], bytes);
memset(audio->mix_buffers[i].array, 0, bytes);
}
/* mix audio lines */
while (line) {
struct audio_line *next = line->next;
/* if line marked for removal, destroy and move to the next */
if (!line->buffers[0].size) {
if (!line->alive) {
audio_output_removeline(audio, line);
line = next;
continue;
}
}
pthread_mutex_lock(&line->mutex);
if (line->buffers[0].size && line->base_timestamp < prev_time) {
clear_excess_audio_data(line,
prev_time - line->base_timestamp);
clear_excess_audio_data(line, prev_time);
line->base_timestamp = prev_time;
}
mix_audio_line(audio, line, bytes, prev_time);
line->base_timestamp = audio_time;
if (mix_audio_line(audio, line, bytes, prev_time))
line->base_timestamp = audio_time;
pthread_mutex_unlock(&line->mutex);
line = next;
}
/* output */
do_audio_output(audio, prev_time, frames);
return audio_time;
}
/* sample audio 40 times a second */
@ -218,8 +283,8 @@ static void *audio_thread(void *param)
pthread_mutex_lock(&audio->line_mutex);
audio_time = os_gettime_ns() - buffer_time;
mix_and_output(audio, audio_time, prev_time);
prev_time = audio_time;
audio_time = mix_and_output(audio, audio_time, prev_time);
prev_time = audio_time;
pthread_mutex_unlock(&audio->line_mutex);
}
@ -530,11 +595,19 @@ static void audio_line_place_data_pos(struct audio_line *line,
}
}
static inline void audio_line_place_data(struct audio_line *line,
void audio_line_place_data(struct audio_line *line,
const struct audio_data *data)
{
uint64_t time_offset = data->timestamp - line->base_timestamp;
size_t pos = time_to_bytes(line->audio, time_offset);
size_t pos = ts_diff_bytes(line->audio, data->timestamp,
line->base_timestamp);
#ifdef DEBUG_AUDIO
blog(LOG_DEBUG, "data->timestamp: %llu, line->base_timestamp: %llu, "
"pos: %lu, bytes: %lu, buf size: %lu",
data->timestamp, line->base_timestamp, pos,
data->frames * line->audio->block_size,
line->buffers[0].size);
#endif
audio_line_place_data_pos(line, data, pos);
}
@ -547,8 +620,11 @@ void audio_line_output(audio_line_t line, const struct audio_data *data)
pthread_mutex_lock(&line->mutex);
if (!line->buffers[0].size) {
line->base_timestamp = data->timestamp;
audio_line_place_data_pos(line, data, 0);
/* XXX: not entirely sure if this is the wisest course of
* action in all circumstances */
line->base_timestamp = data->timestamp -
line->audio->info.buffer_ms * 1000000;
audio_line_place_data(line, data);
} else if (line->base_timestamp <= data->timestamp) {
audio_line_place_data(line, data);

View File

@ -77,8 +77,8 @@ static inline uint64_t convert_speaker_layout(enum speaker_layout layout)
return 0;
}
audio_resampler_t audio_resampler_create(struct resample_info *dst,
struct resample_info *src)
audio_resampler_t audio_resampler_create(const struct resample_info *dst,
const struct resample_info *src)
{
struct audio_resampler *rs = bmalloc(sizeof(struct audio_resampler));
int errcode;

View File

@ -33,8 +33,8 @@ struct resample_info {
enum speaker_layout speakers;
};
EXPORT audio_resampler_t audio_resampler_create(struct resample_info *dst,
struct resample_info *src);
EXPORT audio_resampler_t audio_resampler_create(const struct resample_info *dst,
const struct resample_info *src);
EXPORT void audio_resampler_destroy(audio_resampler_t resampler);
EXPORT bool audio_resampler_resample(audio_resampler_t resampler,

View File

@ -86,10 +86,10 @@ static inline void pack_chroma_2plane(uint8_t *u_plane, uint8_t *v_plane,
}
void compress_uyvx_to_i420(
const uint8_t *input, uint32_t in_row_bytes,
const uint8_t *input, uint32_t in_linesize,
uint32_t width, uint32_t height,
uint32_t start_y, uint32_t end_y,
uint8_t *output[], const uint32_t out_row_bytes[])
uint8_t *output[], const uint32_t out_linesize[])
{
uint8_t *lum_plane = output[0];
uint8_t *u_plane = output[1];
@ -100,19 +100,19 @@ void compress_uyvx_to_i420(
__m128i uv_mask = _mm_set1_epi16(0x00FF);
for (y = start_y; y < end_y; y += 2) {
uint32_t y_pos = y * in_row_bytes;
uint32_t chroma_y_pos = (y>>1) * out_row_bytes[1];
uint32_t lum_y_pos = y * out_row_bytes[0];
uint32_t y_pos = y * in_linesize;
uint32_t chroma_y_pos = (y>>1) * out_linesize[1];
uint32_t lum_y_pos = y * out_linesize[0];
uint32_t x;
for (x = 0; x < width; x += 4) {
const uint8_t *img = input + y_pos + x*4;
uint32_t lum_pos0 = lum_y_pos + x;
uint32_t lum_pos1 = lum_pos0 + out_row_bytes[0];
uint32_t lum_pos1 = lum_pos0 + out_linesize[0];
__m128i line1 = _mm_load_si128((const __m128i*)img);
__m128i line2 = _mm_load_si128(
(const __m128i*)(img + in_row_bytes));
(const __m128i*)(img + in_linesize));
pack_lum(lum_plane, lum_pos0, lum_pos1,
line1, line2, lum_mask);
@ -124,10 +124,10 @@ void compress_uyvx_to_i420(
}
void compress_uyvx_to_nv12(
const uint8_t *input, uint32_t in_row_bytes,
const uint8_t *input, uint32_t in_linesize,
uint32_t width, uint32_t height,
uint32_t start_y, uint32_t end_y,
uint8_t *output[], const uint32_t out_row_bytes[])
uint8_t *output[], const uint32_t out_linesize[])
{
uint8_t *lum_plane = output[0];
uint8_t *chroma_plane = output[1];
@ -137,19 +137,19 @@ void compress_uyvx_to_nv12(
__m128i uv_mask = _mm_set1_epi16(0x00FF);
for (y = start_y; y < end_y; y += 2) {
uint32_t y_pos = y * in_row_bytes;
uint32_t chroma_y_pos = (y>>1) * out_row_bytes[1];
uint32_t lum_y_pos = y * out_row_bytes[0];
uint32_t y_pos = y * in_linesize;
uint32_t chroma_y_pos = (y>>1) * out_linesize[1];
uint32_t lum_y_pos = y * out_linesize[0];
uint32_t x;
for (x = 0; x < width; x += 4) {
const uint8_t *img = input + y_pos + x*4;
uint32_t lum_pos0 = lum_y_pos + x;
uint32_t lum_pos1 = lum_pos0 + out_row_bytes[0];
uint32_t lum_pos1 = lum_pos0 + out_linesize[0];
__m128i line1 = _mm_load_si128((const __m128i*)img);
__m128i line2 = _mm_load_si128(
(const __m128i*)(img + in_row_bytes));
(const __m128i*)(img + in_linesize));
pack_lum(lum_plane, lum_pos0, lum_pos1,
line1, line2, lum_mask);
@ -160,10 +160,10 @@ void compress_uyvx_to_nv12(
}
void decompress_420(
const uint8_t *const input[], const uint32_t in_row_bytes[],
const uint8_t *const input[], const uint32_t in_linesize[],
uint32_t width, uint32_t height,
uint32_t start_y, uint32_t end_y,
uint8_t *output, uint32_t out_row_bytes)
uint8_t *output, uint32_t out_linesize)
{
uint32_t start_y_d2 = start_y/2;
uint32_t width_d2 = width/2;
@ -171,16 +171,16 @@ void decompress_420(
uint32_t y;
for (y = start_y_d2; y < height_d2; y++) {
const uint8_t *chroma0 = input[1] + y * in_row_bytes[1];
const uint8_t *chroma1 = input[2] + y * in_row_bytes[2];
const uint8_t *chroma0 = input[1] + y * in_linesize[1];
const uint8_t *chroma1 = input[2] + y * in_linesize[2];
register const uint8_t *lum0, *lum1;
register uint32_t *output0, *output1;
uint32_t x;
lum0 = input[0] + y * 2*width;
lum1 = lum0 + width;
output0 = (uint32_t*)(output + y * 2 * in_row_bytes[0]);
output1 = (uint32_t*)((uint8_t*)output0 + in_row_bytes[0]);
output0 = (uint32_t*)(output + y * 2 * in_linesize[0]);
output1 = (uint32_t*)((uint8_t*)output0 + in_linesize[0]);
for (x = 0; x < width_d2; x++) {
uint32_t out;
@ -196,10 +196,10 @@ void decompress_420(
}
void decompress_nv12(
const uint8_t *const input[], const uint32_t in_row_bytes[],
const uint8_t *const input[], const uint32_t in_linesize[],
uint32_t width, uint32_t height,
uint32_t start_y, uint32_t end_y,
uint8_t *output, uint32_t out_row_bytes)
uint8_t *output, uint32_t out_linesize)
{
uint32_t start_y_d2 = start_y/2;
uint32_t width_d2 = width/2;
@ -212,11 +212,11 @@ void decompress_nv12(
register uint32_t *output0, *output1;
uint32_t x;
chroma = (const uint16_t*)(input[1] + y * in_row_bytes[1]);
lum0 = input[0] + y*2 * in_row_bytes[0];
lum1 = lum0 + in_row_bytes[0];
output0 = (uint32_t*)(output + y*2 * out_row_bytes);
output1 = (uint32_t*)((uint8_t*)output0 + out_row_bytes);
chroma = (const uint16_t*)(input[1] + y * in_linesize[1]);
lum0 = input[0] + y*2 * in_linesize[0];
lum1 = lum0 + in_linesize[0];
output0 = (uint32_t*)(output + y*2 * out_linesize);
output1 = (uint32_t*)((uint8_t*)output0 + out_linesize);
for (x = 0; x < width_d2; x++) {
uint32_t out = *(chroma++) << 8;
@ -231,10 +231,10 @@ void decompress_nv12(
}
void decompress_422(
const uint8_t *input, uint32_t in_row_bytes,
const uint8_t *input, uint32_t in_linesize,
uint32_t width, uint32_t height,
uint32_t start_y, uint32_t end_y,
uint8_t *output, uint32_t out_row_bytes,
uint8_t *output, uint32_t out_linesize,
bool leading_lum)
{
uint32_t width_d2 = width >> 1;
@ -246,9 +246,9 @@ void decompress_422(
if (leading_lum) {
for (y = start_y; y < end_y; y++) {
input32 = (const uint32_t*)(input + y*in_row_bytes);
input32 = (const uint32_t*)(input + y*in_linesize);
input32_end = input32 + width_d2;
output32 = (uint32_t*)(output + y*out_row_bytes);
output32 = (uint32_t*)(output + y*out_linesize);
while(input32 < input32_end) {
register uint32_t dw = *input32;
@ -264,9 +264,9 @@ void decompress_422(
}
} else {
for (y = start_y; y < end_y; y++) {
input32 = (const uint32_t*)(input + y*in_row_bytes);
input32 = (const uint32_t*)(input + y*in_linesize);
input32_end = input32 + width_d2;
output32 = (uint32_t*)(output + y*out_row_bytes);
output32 = (uint32_t*)(output + y*out_linesize);
while (input32 < input32_end) {
register uint32_t dw = *input32;

View File

@ -28,34 +28,34 @@ extern "C" {
*/
EXPORT void compress_uyvx_to_i420(
const uint8_t *input, uint32_t in_row_bytes,
const uint8_t *input, uint32_t in_linesize,
uint32_t width, uint32_t height,
uint32_t start_y, uint32_t end_y,
uint8_t *output[], const uint32_t out_row_bytes[]);
uint8_t *output[], const uint32_t out_linesize[]);
EXPORT void compress_uyvx_to_nv12(
const uint8_t *input, uint32_t in_row_bytes,
const uint8_t *input, uint32_t in_linesize,
uint32_t width, uint32_t height,
uint32_t start_y, uint32_t end_y,
uint8_t *output[], const uint32_t out_row_bytes[]);
uint8_t *output[], const uint32_t out_linesize[]);
EXPORT void decompress_nv12(
const uint8_t *const input[], const uint32_t in_row_bytes[],
const uint8_t *const input[], const uint32_t in_linesize[],
uint32_t width, uint32_t height,
uint32_t start_y, uint32_t end_y,
uint8_t *output, uint32_t out_row_bytes);
uint8_t *output, uint32_t out_linesize);
EXPORT void decompress_420(
const uint8_t *const input[], const uint32_t in_row_bytes[],
const uint8_t *const input[], const uint32_t in_linesize[],
uint32_t width, uint32_t height,
uint32_t start_y, uint32_t end_y,
uint8_t *output, uint32_t out_row_bytes);
uint8_t *output, uint32_t out_linesize);
EXPORT void decompress_422(
const uint8_t *input, uint32_t in_row_bytes,
const uint8_t *input, uint32_t in_linesize,
uint32_t width, uint32_t height,
uint32_t start_y, uint32_t end_y,
uint8_t *output, uint32_t out_row_bytes,
uint8_t *output, uint32_t out_linesize,
bool leading_lum);
#ifdef __cplusplus

View File

@ -55,14 +55,10 @@ struct video_output {
static inline void video_swapframes(struct video_output *video)
{
pthread_mutex_lock(&video->data_mutex);
if (video->new_frame) {
video->cur_frame = video->next_frame;
video->new_frame = false;
}
pthread_mutex_unlock(&video->data_mutex);
}
static inline void video_output_cur_frame(struct video_output *video)
@ -75,28 +71,10 @@ static inline void video_output_cur_frame(struct video_output *video)
pthread_mutex_lock(&video->input_mutex);
/* TEST CODE */
/*static struct video_frame frame = {0};
if (!frame.data[0]) {
frame.data[0] = bmalloc(width * height);
frame.data[1] = bmalloc((width/2) * (height/2));
frame.data[2] = bmalloc((width/2) * (height/2));
frame.row_size[0] = width;
frame.row_size[1] = width/2;
frame.row_size[2] = width/2;
}
compress_uyvx_to_i420(
video->cur_frame.data[0], video->cur_frame.row_size[0],
width, height, 0, height,
(uint8_t**)frame.data, (uint32_t*)frame.row_size);*/
/* TODO: conversion */
for (size_t i = 0; i < video->inputs.num; i++) {
struct video_input *input = video->inputs.array+i;
input->callback(input->param, &video->cur_frame);//&frame);
input->callback(input->param, &video->cur_frame);
}
pthread_mutex_unlock(&video->input_mutex);
@ -115,8 +93,13 @@ static void *video_thread(void *param)
/* wait another half a frame, swap and output frames */
os_sleepto_ns(cur_time += (video->frame_time/2));
pthread_mutex_lock(&video->data_mutex);
video_swapframes(video);
video_output_cur_frame(video);
pthread_mutex_unlock(&video->data_mutex);
}
return NULL;
@ -217,7 +200,6 @@ void video_output_connect(video_t video,
input.conversion.format = video->info.format;
input.conversion.width = video->info.width;
input.conversion.height = video->info.height;
input.conversion.row_align = 1;
}
da_push_back(video->inputs, &input);

View File

@ -43,8 +43,6 @@ enum video_format {
VIDEO_FORMAT_UYVY,
/* packed uncompressed formats */
VIDEO_FORMAT_YUVX,
VIDEO_FORMAT_UYVX,
VIDEO_FORMAT_RGBA,
VIDEO_FORMAT_BGRA,
VIDEO_FORMAT_BGRX,
@ -52,7 +50,7 @@ enum video_format {
struct video_frame {
const uint8_t *data[MAX_VIDEO_PLANES];
uint32_t row_size[MAX_VIDEO_PLANES];
uint32_t linesize[MAX_VIDEO_PLANES];
uint64_t timestamp;
};
@ -70,9 +68,27 @@ struct video_convert_info {
enum video_format format;
uint32_t width;
uint32_t height;
uint32_t row_align;
};
static inline bool format_is_yuv(enum video_format format)
{
switch (format) {
case VIDEO_FORMAT_I420:
case VIDEO_FORMAT_NV12:
case VIDEO_FORMAT_YVYU:
case VIDEO_FORMAT_YUY2:
case VIDEO_FORMAT_UYVY:
return true;
case VIDEO_FORMAT_NONE:
case VIDEO_FORMAT_RGBA:
case VIDEO_FORMAT_BGRA:
case VIDEO_FORMAT_BGRX:
return false;
}
return false;
}
#define VIDEO_OUTPUT_SUCCESS 0
#define VIDEO_OUTPUT_INVALIDPARAM -1
#define VIDEO_OUTPUT_FAIL -2

View File

@ -59,6 +59,7 @@ struct obs_core_video {
bool textures_rendered[NUM_TEXTURES];
bool textures_output[NUM_TEXTURES];
bool textures_copied[NUM_TEXTURES];
struct source_frame convert_frames[NUM_TEXTURES];
effect_t default_effect;
stagesurf_t mapped_surface;
int cur_texture;

View File

@ -183,12 +183,18 @@ fail:
#define ALIGN_SIZE(size, align) \
size = (((size)+(align-1)) & (~(align-1)))
static void alloc_frame_data(struct source_frame *frame,
/* messy code alarm */
void source_frame_init(struct source_frame *frame,
enum video_format format, uint32_t width, uint32_t height)
{
size_t size;
size_t offsets[MAX_VIDEO_PLANES];
int alignment = base_get_alignment();
memset(offsets, 0, sizeof(offsets));
frame->format = format;
frame->width = width;
frame->height = height;
switch (format) {
case VIDEO_FORMAT_NONE:
@ -196,68 +202,53 @@ static void alloc_frame_data(struct source_frame *frame,
case VIDEO_FORMAT_I420:
size = width * height;
ALIGN_SIZE(size, 32);
ALIGN_SIZE(size, alignment);
offsets[0] = size;
size += (width/2) * (height/2);
ALIGN_SIZE(size, 32);
ALIGN_SIZE(size, alignment);
offsets[1] = size;
size += (width/2) * (height/2);
ALIGN_SIZE(size, 32);
ALIGN_SIZE(size, alignment);
frame->data[0] = bmalloc(size);
frame->data[1] = (uint8_t*)frame->data[0] + offsets[0];
frame->data[2] = (uint8_t*)frame->data[0] + offsets[1];
frame->row_bytes[0] = width;
frame->row_bytes[1] = width/2;
frame->row_bytes[2] = width/2;
frame->linesize[0] = width;
frame->linesize[1] = width/2;
frame->linesize[2] = width/2;
break;
case VIDEO_FORMAT_NV12:
size = width * height;
ALIGN_SIZE(size, 32);
ALIGN_SIZE(size, alignment);
offsets[0] = size;
size += (width/2) * (height/2) * 2;
ALIGN_SIZE(size, 32);
ALIGN_SIZE(size, alignment);
frame->data[0] = bmalloc(size);
frame->data[1] = (uint8_t*)frame->data[0] + offsets[0];
frame->row_bytes[0] = width;
frame->row_bytes[1] = width;
frame->linesize[0] = width;
frame->linesize[1] = width;
break;
case VIDEO_FORMAT_YVYU:
case VIDEO_FORMAT_YUY2:
case VIDEO_FORMAT_UYVY:
size = width * height * 2;
ALIGN_SIZE(size, 32);
ALIGN_SIZE(size, alignment);
frame->data[0] = bmalloc(size);
frame->row_bytes[0] = width*2;
frame->linesize[0] = width*2;
break;
case VIDEO_FORMAT_YUVX:
case VIDEO_FORMAT_UYVX:
case VIDEO_FORMAT_RGBA:
case VIDEO_FORMAT_BGRA:
case VIDEO_FORMAT_BGRX:
size = width * height * 4;
ALIGN_SIZE(size, 32);
ALIGN_SIZE(size, alignment);
frame->data[0] = bmalloc(size);
frame->row_bytes[0] = width*4;
frame->linesize[0] = width*4;
break;
}
}
struct source_frame *source_frame_alloc(enum video_format format,
uint32_t width, uint32_t height)
{
struct source_frame *frame = bmalloc(sizeof(struct source_frame));
memset(frame, 0, sizeof(struct source_frame));
alloc_frame_data(frame, format, width, height);
frame->format = format;
frame->width = width;
frame->height = height;
return frame;
}
static void obs_source_destroy(obs_source_t source)
{
size_t i;
@ -385,14 +376,14 @@ void obs_source_video_tick(obs_source_t source, float seconds)
source->callbacks.video_tick(source->data, seconds);
}
/* unless the value is 3+ hours worth of frames, this won't overflow */
static inline uint64_t conv_frames_to_time(obs_source_t source, size_t frames)
{
const struct audio_output_info *info;
double sps_to_ns;
info = audio_output_getinfo(obs->audio.audio);
sps_to_ns = 1000000000.0 / (double)info->samples_per_sec;
return (uint64_t)((double)frames * sps_to_ns);
return (uint64_t)frames * 1000000000ULL /
(uint64_t)info->samples_per_sec;
}
/* maximum "direct" timestamp variance in nanoseconds */
@ -452,6 +443,7 @@ static void source_output_audio_line(obs_source_t source,
in.timestamp += source->timing_adjust;
in.volume = source->volume;
audio_line_output(source->audio_line, &in);
}
@ -495,8 +487,6 @@ static inline enum convert_type get_convert_type(enum video_format format)
return CONVERT_422_U;
case VIDEO_FORMAT_NONE:
case VIDEO_FORMAT_YUVX:
case VIDEO_FORMAT_UYVX:
case VIDEO_FORMAT_RGBA:
case VIDEO_FORMAT_BGRA:
case VIDEO_FORMAT_BGRX:
@ -506,61 +496,40 @@ static inline enum convert_type get_convert_type(enum video_format format)
return CONVERT_NONE;
}
static inline bool is_yuv(enum video_format format)
{
switch (format) {
case VIDEO_FORMAT_I420:
case VIDEO_FORMAT_NV12:
case VIDEO_FORMAT_YVYU:
case VIDEO_FORMAT_YUY2:
case VIDEO_FORMAT_UYVY:
case VIDEO_FORMAT_YUVX:
case VIDEO_FORMAT_UYVX:
return true;
case VIDEO_FORMAT_NONE:
case VIDEO_FORMAT_RGBA:
case VIDEO_FORMAT_BGRA:
case VIDEO_FORMAT_BGRX:
return false;
}
return false;
}
static bool upload_frame(texture_t tex, const struct source_frame *frame)
{
void *ptr;
uint32_t row_bytes;
uint32_t linesize;
enum convert_type type = get_convert_type(frame->format);
if (type == CONVERT_NONE) {
texture_setimage(tex, frame->data[0], frame->row_bytes[0],
texture_setimage(tex, frame->data[0], frame->linesize[0],
false);
return true;
}
if (!texture_map(tex, &ptr, &row_bytes))
if (!texture_map(tex, &ptr, &linesize))
return false;
if (type == CONVERT_420)
decompress_420(frame->data, frame->row_bytes,
decompress_420(frame->data, frame->linesize,
frame->width, frame->height, 0, frame->height,
ptr, row_bytes);
ptr, linesize);
else if (type == CONVERT_NV12)
decompress_nv12(frame->data, frame->row_bytes,
decompress_nv12(frame->data, frame->linesize,
frame->width, frame->height, 0, frame->height,
ptr, row_bytes);
ptr, linesize);
else if (type == CONVERT_422_Y)
decompress_422(frame->data[0], frame->row_bytes[0],
decompress_422(frame->data[0], frame->linesize[0],
frame->width, frame->height, 0, frame->height,
ptr, row_bytes, true);
ptr, linesize, true);
else if (type == CONVERT_422_U)
decompress_422(frame->data[0], frame->row_bytes[0],
decompress_422(frame->data[0], frame->linesize[0],
frame->width, frame->height, 0, frame->height,
ptr, row_bytes, false);
ptr, linesize, false);
texture_unmap(tex);
return true;
@ -569,7 +538,7 @@ static bool upload_frame(texture_t tex, const struct source_frame *frame)
static void obs_source_draw_texture(texture_t tex, struct source_frame *frame)
{
effect_t effect = obs->video.default_effect;
bool yuv = is_yuv(frame->format);
bool yuv = format_is_yuv(frame->format);
const char *type = yuv ? "DrawMatrix" : "Draw";
technique_t tech;
eparam_t param;
@ -792,10 +761,10 @@ static inline struct source_frame *filter_async_video(obs_source_t source,
static inline void copy_frame_data_line(struct source_frame *dst,
const struct source_frame *src, uint32_t plane, uint32_t y)
{
uint32_t pos_src = y * src->row_bytes[plane];
uint32_t pos_dst = y * dst->row_bytes[plane];
uint32_t bytes = dst->row_bytes[plane] < src->row_bytes[plane] ?
dst->row_bytes[plane] : src->row_bytes[plane];
uint32_t pos_src = y * src->linesize[plane];
uint32_t pos_dst = y * dst->linesize[plane];
uint32_t bytes = dst->linesize[plane] < src->linesize[plane] ?
dst->linesize[plane] : src->linesize[plane];
memcpy(dst->data[plane] + pos_dst, src->data[plane] + pos_src, bytes);
}
@ -803,12 +772,12 @@ static inline void copy_frame_data_line(struct source_frame *dst,
static inline void copy_frame_data_plane(struct source_frame *dst,
const struct source_frame *src, uint32_t plane, uint32_t lines)
{
if (dst->row_bytes != src->row_bytes)
if (dst->linesize[plane] != src->linesize[plane])
for (uint32_t y = 0; y < lines; y++)
copy_frame_data_line(dst, src, plane, y);
else
memcpy(dst->data[plane], src->data[plane],
dst->row_bytes[plane] * lines);
dst->linesize[plane] * lines);
}
static void copy_frame_data(struct source_frame *dst,
@ -834,8 +803,6 @@ static void copy_frame_data(struct source_frame *dst,
case VIDEO_FORMAT_YUY2:
case VIDEO_FORMAT_UYVY:
case VIDEO_FORMAT_NONE:
case VIDEO_FORMAT_YUVX:
case VIDEO_FORMAT_UYVX:
case VIDEO_FORMAT_RGBA:
case VIDEO_FORMAT_BGRA:
case VIDEO_FORMAT_BGRX:
@ -847,7 +814,7 @@ static inline struct source_frame *cache_video(obs_source_t source,
const struct source_frame *frame)
{
/* TODO: use an actual cache */
struct source_frame *new_frame = source_frame_alloc(frame->format,
struct source_frame *new_frame = source_frame_create(frame->format,
frame->width, frame->height);
copy_frame_data(new_frame, frame);

View File

@ -18,6 +18,7 @@
#include "obs.h"
#include "obs-internal.h"
#include "graphics/vec4.h"
#include "media-io/format-conversion.h"
static void tick_sources(uint64_t cur_time, uint64_t *last_time)
{
@ -93,6 +94,8 @@ static inline void render_displays(void)
if (!obs->data.valid)
return;
gs_entercontext(obs_graphics());
/* render extra displays/swaps */
pthread_mutex_lock(&obs->data.displays_mutex);
@ -103,10 +106,15 @@ static inline void render_displays(void)
/* render main display */
render_display(NULL);
gs_leavecontext();
}
static inline void set_render_size(uint32_t width, uint32_t height)
{
gs_enable_depthtest(false);
gs_setcullmode(GS_NEITHER);
gs_ortho(0.0f, (float)width, 0.0f, (float)height, -100.0f, 100.0f);
gs_setviewport(0, 0, width, height);
}
@ -169,9 +177,9 @@ static inline void render_output_texture(struct obs_core_video *video,
/* TODO: replace with programmable code */
const float mat_val[16] =
{
0.256788f, 0.504129f, 0.097906f, 0.062745f,
-0.148223f, -0.290993f, 0.439216f, 0.501961f,
0.439216f, -0.367788f, -0.071427f, 0.501961f,
-0.100644f, -0.338572f, 0.439216f, 0.501961f,
0.182586f, 0.614231f, 0.062007f, 0.062745f,
0.439216f, -0.398942f, -0.040274f, 0.501961f,
0.000000f, 0.000000f, 0.000000f, 1.000000f
};
@ -222,22 +230,66 @@ static inline void render_video(struct obs_core_video *video, int cur_texture,
gs_endscene();
}
static inline void output_video(struct obs_core_video *video, int cur_texture,
int prev_texture, uint64_t timestamp)
/* TODO: replace with more optimal conversion */
static inline bool download_frame(struct obs_core_video *video, int cur_texture,
int prev_texture, struct video_frame *frame)
{
stagesurf_t surface = video->copy_surfaces[prev_texture];
struct video_frame frame;
if (!video->textures_copied[prev_texture])
return;
return false;
memset(&frame, 0, sizeof(struct video_frame));
frame.timestamp = timestamp;
if (!stagesurface_map(surface, &frame->data[0], &frame->linesize[0]))
return false;
if (stagesurface_map(surface, &frame.data[0], &frame.row_size[0])) {
video->mapped_surface = surface;
video_output_frame(video->video, &frame);
video->mapped_surface = surface;
return true;
}
static bool convert_frame(struct obs_core_video *video,
struct video_frame *frame,
const struct video_output_info *info, int cur_texture)
{
struct source_frame *new_frame = &video->convert_frames[cur_texture];
if (info->format == VIDEO_FORMAT_I420) {
compress_uyvx_to_i420(
frame->data[0], frame->linesize[0],
info->width, info->height,
0, info->height,
new_frame->data, new_frame->linesize);
} else if (info->format == VIDEO_FORMAT_NV12) {
compress_uyvx_to_nv12(
frame->data[0], frame->linesize[0],
info->width, info->height,
0, info->height,
new_frame->data, new_frame->linesize);
} else {
blog(LOG_WARNING, "convert_frame: unsupported texture format");
return false;
}
for (size_t i = 0; i < MAX_VIDEO_PLANES; i++) {
frame->data[i] = new_frame->data[i];
frame->linesize[i] = new_frame->linesize[i];
}
return true;
}
static inline void output_video_data(struct obs_core_video *video,
struct video_frame *frame, int cur_texture)
{
const struct video_output_info *info;
info = video_output_getinfo(video->video);
if (format_is_yuv(info->format))
if (!convert_frame(video, frame, info, cur_texture))
return;
video_output_frame(video->video, frame);
}
static inline void output_frame(uint64_t timestamp)
@ -245,9 +297,21 @@ static inline void output_frame(uint64_t timestamp)
struct obs_core_video *video = &obs->video;
int cur_texture = video->cur_texture;
int prev_texture = cur_texture == 0 ? NUM_TEXTURES-1 : cur_texture-1;
struct video_frame frame;
bool frame_ready;
memset(&frame, 0, sizeof(struct video_frame));
frame.timestamp = timestamp;
gs_entercontext(obs_graphics());
render_video(video, cur_texture, prev_texture);
output_video(video, cur_texture, prev_texture, timestamp);
frame_ready = download_frame(video, cur_texture, prev_texture, &frame);
gs_leavecontext();
if (frame_ready)
output_video_data(video, &frame, cur_texture);
if (++video->cur_texture == NUM_TEXTURES)
video->cur_texture = 0;
@ -260,13 +324,12 @@ void *obs_video_thread(void *param)
while (video_output_wait(obs->video.video)) {
uint64_t cur_time = video_gettime(obs->video.video);
gs_entercontext(obs_graphics());
tick_sources(cur_time, &last_time);
render_displays();
output_frame(cur_time);
gs_leavecontext();
}
return NULL;

View File

@ -51,6 +51,7 @@ static inline void make_video_info(struct video_output_info *vi,
static bool obs_init_textures(struct obs_video_info *ovi)
{
struct obs_core_video *video = &obs->video;
bool yuv = format_is_yuv(ovi->output_format);
size_t i;
for (i = 0; i < NUM_TEXTURES; i++) {
@ -74,6 +75,11 @@ static bool obs_init_textures(struct obs_video_info *ovi)
if (!video->output_textures[i])
return false;
if (yuv)
source_frame_init(&video->convert_frames[i],
ovi->output_format,
ovi->output_width, ovi->output_height);
}
return true;
@ -178,6 +184,7 @@ static void obs_free_graphics()
stagesurface_destroy(video->copy_surfaces[i]);
texture_destroy(video->render_textures[i]);
texture_destroy(video->output_textures[i]);
source_frame_free(&video->convert_frames[i]);
video->copy_surfaces[i] = NULL;
video->render_textures[i] = NULL;

View File

@ -116,7 +116,7 @@ struct source_audio {
struct source_frame {
uint8_t *data[MAX_VIDEO_PLANES];
uint32_t row_bytes[MAX_VIDEO_PLANES];
uint32_t linesize[MAX_VIDEO_PLANES];
uint32_t width;
uint32_t height;
uint64_t timestamp;
@ -126,15 +126,6 @@ struct source_frame {
bool flip;
};
EXPORT struct source_frame *source_frame_alloc(enum video_format format,
uint32_t width, uint32_t height);
static inline void source_frame_destroy(struct source_frame *frame)
{
bfree(frame->data[0]);
bfree(frame);
}
enum packet_priority {
PACKET_PRIORITY_DISPOSABLE,
PACKET_PRIORITY_LOW,
@ -597,6 +588,39 @@ EXPORT obs_service_t obs_service_create(const char *service,
EXPORT void obs_service_destroy(obs_service_t service);
/* ------------------------------------------------------------------------- */
/* Source frame allocation functions */
EXPORT void source_frame_init(struct source_frame *frame,
enum video_format format, uint32_t width, uint32_t height);
static inline void source_frame_free(struct source_frame *frame)
{
if (frame) {
bfree(frame->data[0]);
memset(frame, 0, sizeof(struct source_frame));
}
}
static inline struct source_frame *source_frame_create(
enum video_format format, uint32_t width, uint32_t height)
{
struct source_frame *frame;
frame = (struct source_frame*)bmalloc(sizeof(struct source_frame));
memset(frame, 0, sizeof(struct source_frame));
source_frame_init(frame, format, width, height);
return frame;
}
static inline void source_frame_destroy(struct source_frame *frame)
{
if (frame) {
bfree(frame->data[0]);
bfree(frame);
}
}
#ifdef __cplusplus
}
#endif

View File

@ -24,11 +24,12 @@
* http://www.ffmpeg.org/
*/
#define ALIGNMENT 16
#define ALIGNMENT 32
#if defined(_WIN32) && !defined(_WIN64)
/* TODO: use memalign for non-windows systems */
#if defined(_WIN32)
#define ALIGNED_MALLOC 1
#elif !defined(__LP64__)
#else
#define ALIGNMENT_HACK 1
#endif
@ -130,6 +131,11 @@ uint64_t bnum_allocs(void)
return num_allocs;
}
int base_get_alignment(void)
{
return ALIGNMENT;
}
void *bmemdup(const void *ptr, size_t size)
{
void *out = bmalloc(size);

View File

@ -37,6 +37,8 @@ EXPORT void *bmalloc(size_t size);
EXPORT void *brealloc(void *ptr, size_t size);
EXPORT void bfree(void *ptr);
EXPORT int base_get_alignment(void);
EXPORT uint64_t bnum_allocs(void);
EXPORT void *bmemdup(const void *ptr, size_t size);

View File

@ -34,11 +34,11 @@ void *os_dlopen(const char *path)
{
struct dstr dylib_name;
dstr_init_copy(&dylib_name, path);
if(!dstr_find(&dylib_name, ".so"))
if (!dstr_find(&dylib_name, ".so"))
dstr_cat(&dylib_name, ".so");
void *res = dlopen(dylib_name.array, RTLD_LAZY);
if(!res)
if (!res)
blog(LOG_ERROR, "os_dlopen(%s->%s): %s\n",
path, dylib_name.array, dlerror());
@ -56,23 +56,26 @@ void os_dlclose(void *module)
dlclose(module);
}
void os_sleepto_ns(uint64_t time_target)
bool os_sleepto_ns(uint64_t time_target)
{
uint64_t current = os_gettime_ns();
if(time_target < current)
return;
if (time_target < current)
return false;
time_target -= current;
struct timespec req,
remain;
struct timespec req, remain;
memset(&req, 0, sizeof(req));
memset(&remain, 0, sizeof(remain));
req.tv_sec = time_target/1000000000;
req.tv_nsec = time_target%1000000000;
while(nanosleep(&req, &remain))
{
while (nanosleep(&req, &remain)) {
req = remain;
memset(&remain, 0, sizeof(remain));
}
return true;
}
void os_sleep_ms(uint32_t duration)

View File

@ -29,11 +29,11 @@ void *os_dlopen(const char *path)
{
struct dstr dylib_name;
dstr_init_copy(&dylib_name, path);
if(!dstr_find(&dylib_name, ".so"))
if (!dstr_find(&dylib_name, ".so"))
dstr_cat(&dylib_name, ".so");
void *res = dlopen(dylib_name.array, RTLD_LAZY);
if(!res)
if (!res)
blog(LOG_ERROR, "os_dlopen(%s->%s): %s\n",
path, dylib_name.array, dlerror());
@ -51,23 +51,26 @@ void os_dlclose(void *module)
dlclose(module);
}
void os_sleepto_ns(uint64_t time_target)
bool os_sleepto_ns(uint64_t time_target)
{
uint64_t current = os_gettime_ns();
if(time_target < current)
return;
if (time_target < current)
return false;
time_target -= current;
struct timespec req,
remain;
struct timespec req, remain;
memset(&req, 0, sizeof(req));
memset(&remain, 0, sizeof(remain));
req.tv_sec = time_target/1000000000;
req.tv_nsec = time_target%1000000000;
while(nanosleep(&req, &remain))
{
while (nanosleep(&req, &remain)) {
req = remain;
memset(&remain, 0, sizeof(remain));
}
return true;
}
void os_sleep_ms(uint32_t duration)

View File

@ -82,13 +82,13 @@ void os_dlclose(void *module)
FreeLibrary(module);
}
void os_sleepto_ns(uint64_t time_target)
bool os_sleepto_ns(uint64_t time_target)
{
uint64_t t = os_gettime_ns();
uint32_t milliseconds;
if (t >= time_target)
return;
return false;
milliseconds = (uint32_t)((time_target - t)/1000000);
if (milliseconds > 1)
@ -97,7 +97,7 @@ void os_sleepto_ns(uint64_t time_target)
for (;;) {
t = os_gettime_ns();
if (t >= time_target)
return;
return true;
#if 1
Sleep(1);

View File

@ -57,7 +57,12 @@ EXPORT void *os_dlopen(const char *path);
EXPORT void *os_dlsym(void *module, const char *func);
EXPORT void os_dlclose(void *module);
EXPORT void os_sleepto_ns(uint64_t time_target);
/**
* Sleeps to a specific time (in nanoseconds). Doesn't have to be super
* accurate in terms of actual slept time because the target time is ensured.
* Returns false if already at or past target time.
*/
EXPORT bool os_sleepto_ns(uint64_t time_target);
EXPORT void os_sleep_ms(uint32_t duration);
EXPORT uint64_t os_gettime_ns(void);

View File

@ -62,9 +62,9 @@ void OBSBasic::OBSInit()
/* TODO: this is a test */
obs_load_module("test-input");
/*obs_load_module("obs-ffmpeg");
obs_load_module("obs-ffmpeg");
obs_output_t output = obs_output_create("ffmpeg_output", "test",
/*obs_output_t output = obs_output_create("ffmpeg_output", "test",
NULL);
obs_output_start(output);*/
@ -281,14 +281,9 @@ bool OBSBasic::InitGraphics()
"Video", "OutputCX");
ovi.output_height = (uint32_t)config_get_uint(GetGlobalConfig(),
"Video", "OutputCY");
ovi.output_format = VIDEO_FORMAT_RGBA;
ovi.output_format = VIDEO_FORMAT_I420;
ovi.adapter = 0;
//#ifdef __WXGTK__
/* Ugly hack for GTK, I'm hoping this can be avoided eventually... */
// gtk_widget_realize(previewPanel->GetHandle());
//#endif
QTToGSWindow(ui->preview, ovi.window);
//required to make opengl display stuff on osx(?)

View File

@ -1,5 +1,5 @@
/******************************************************************************
Copyright (C) 2013 by Hugh Bailey <obs.jim@gmail.com>
Copyright (C) 2014 by Hugh Bailey <obs.jim@gmail.com>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@ -19,7 +19,7 @@
#include "obs-ffmpeg-output.h"
/* TODO: remove these later */
#define FILENAME_TODO "D:\\test.mp4"
#define FILENAME_TODO "D:\\test.avi"
#define SPS_TODO 44100
/* NOTE: much of this stuff is test stuff that was more or less copied from
@ -35,8 +35,6 @@ static inline enum AVPixelFormat obs_to_ffmpeg_video_format(
case VIDEO_FORMAT_YVYU: return AV_PIX_FMT_NONE;
case VIDEO_FORMAT_YUY2: return AV_PIX_FMT_YUYV422;
case VIDEO_FORMAT_UYVY: return AV_PIX_FMT_UYVY422;
case VIDEO_FORMAT_YUVX: return AV_PIX_FMT_NONE;
case VIDEO_FORMAT_UYVX: return AV_PIX_FMT_NONE;
case VIDEO_FORMAT_RGBA: return AV_PIX_FMT_RGBA;
case VIDEO_FORMAT_BGRA: return AV_PIX_FMT_BGRA;
case VIDEO_FORMAT_BGRX: return AV_PIX_FMT_BGRA;
@ -174,6 +172,16 @@ static bool open_audio_codec(struct ffmpeg_data *data,
return false;
}
data->frame_size = context->frame_size ? context->frame_size : 1024;
ret = av_samples_alloc(data->samples, NULL, context->channels,
data->frame_size, context->sample_fmt, 0);
if (ret < 0) {
blog(LOG_ERROR, "Failed to create audio buffer: %s",
av_err2str(ret));
return false;
}
return true;
}
@ -253,6 +261,9 @@ static void close_video(struct ffmpeg_data *data)
static void close_audio(struct ffmpeg_data *data)
{
for (size_t i = 0; i < MAX_AUDIO_PLANES; i++)
circlebuf_free(&data->excess_frames[i]);
av_freep(&data->samples[0]);
avcodec_close(data->audio->codec);
av_frame_free(&data->aframe);
@ -356,7 +367,7 @@ static inline void copy_data(AVPicture *pic, const struct video_frame *frame,
int height)
{
for (int plane = 0; plane < YUV420_PLANES; plane++) {
int frame_rowsize = (int)frame->row_size[plane];
int frame_rowsize = (int)frame->linesize[plane];
int pic_rowsize = pic->linesize[plane];
int bytes = frame_rowsize < pic_rowsize ?
frame_rowsize : pic_rowsize;
@ -384,7 +395,7 @@ static void receive_video(void *param, const struct video_frame *frame)
av_init_packet(&packet);
if (context->pix_fmt != AV_PIX_FMT_YUV420P)
sws_scale(data->swscale, frame->data, frame->row_size,
sws_scale(data->swscale, frame->data, frame->linesize,
0, context->height, data->dst_picture.data,
data->dst_picture.linesize);
else
@ -431,30 +442,28 @@ static void receive_video(void *param, const struct video_frame *frame)
data->total_frames++;
}
static void receive_audio(void *param, const struct audio_data *frame)
static inline void encode_audio(struct ffmpeg_data *data,
struct AVCodecContext *context, size_t block_size)
{
struct ffmpeg_output *output = param;
struct ffmpeg_data *data = &output->ff_data;
AVCodecContext *context = data->audio->codec;
AVPacket packet = {0};
int channels = (int)audio_output_channels(obs_audio());
size_t planes = audio_output_planes(obs_audio());
int ret, got_packet;
size_t total_size = data->frame_size * block_size * context->channels;
data->aframe->nb_samples = frame->frames;
data->aframe->nb_samples = data->frame_size;
data->aframe->pts = av_rescale_q(data->total_samples,
(AVRational){1, context->sample_rate},
context->time_base);
if (!data->samples[0])
av_samples_alloc(data->samples, NULL, channels,
frame->frames, context->sample_fmt, 0);
for (size_t i = 0; i < planes; i++) {
/* TODO */
ret = avcodec_fill_audio_frame(data->aframe, context->channels,
context->sample_fmt, data->samples[0],
total_size, 1);
if (ret < 0) {
blog(LOG_ERROR, "receive_audio: avcodec_fill_audio_frame "
"failed: %s", av_err2str(ret));
return;
}
data->total_samples += frame->frames;
data->total_samples += data->frame_size;
ret = avcodec_encode_audio2(context, &packet, data->aframe,
&got_packet);
@ -479,6 +488,30 @@ static void receive_audio(void *param, const struct audio_data *frame)
av_err2str(ret));
}
static void receive_audio(void *param, const struct audio_data *frame)
{
struct ffmpeg_output *output = param;
struct ffmpeg_data *data = &output->ff_data;
AVCodecContext *context = data->audio->codec;
size_t planes = audio_output_planes(obs_audio());
size_t block_size = audio_output_blocksize(obs_audio());
size_t frame_size_bytes = (size_t)data->frame_size * block_size;
for (size_t i = 0; i < planes; i++)
circlebuf_push_back(&data->excess_frames[i], frame->data[0],
frame->frames * block_size);
while (data->excess_frames[0].size >= frame_size_bytes) {
for (size_t i = 0; i < planes; i++)
circlebuf_pop_front(&data->excess_frames[i],
data->samples[i], frame_size_bytes);
encode_audio(data, context, block_size);
}
}
bool ffmpeg_output_start(struct ffmpeg_output *data)
{
video_t video = obs_video();
@ -502,10 +535,9 @@ bool ffmpeg_output_start(struct ffmpeg_output *data)
vci.format = VIDEO_FORMAT_I420;
vci.width = 0;
vci.height = 0;
vci.row_align = 1;
//video_output_connect(video, &vci, receive_video, data);
//audio_output_connect(audio, &aci, receive_audio, data);
video_output_connect(video, &vci, receive_video, data);
audio_output_connect(audio, &aci, receive_audio, data);
data->active = true;
return true;

View File

@ -18,6 +18,7 @@
#pragma once
#include <util/c99defs.h>
#include <util/circlebuf.h>
#include <media-io/audio-io.h>
#include <media-io/video-io.h>
@ -34,8 +35,10 @@ struct ffmpeg_data {
AVPicture dst_picture;
AVFrame *vframe;
int frame_size;
int total_frames;
struct circlebuf excess_frames[MAX_AUDIO_PLANES];
uint8_t *samples[MAX_AUDIO_PLANES];
AVFrame *aframe;
int total_samples;

View File

@ -1,28 +1,31 @@
#include <math.h>
#include "test-sinewave.h"
/* middle C */
const double rate = 261.63/48000.0;
#define M_PI 3.1415926535897932384626433832795
#define M_PI_X2 M_PI*2
static void *sinewave_thread(void *pdata)
{
struct sinewave_data *swd = pdata;
uint64_t last_time = os_gettime_ns();
uint64_t ts = 0;
double sin_val = 0.0;
double cos_val = 0.0;
uint8_t bytes[480];
while (event_try(&swd->event) == EAGAIN) {
os_sleepto_ns(last_time += 10000000);
if (!os_sleepto_ns(last_time += 10000000))
last_time = os_gettime_ns();
for (size_t i = 0; i < 480; i++) {
sin_val += rate * M_PI;
if (sin_val > M_PI)
sin_val -= M_PI;
cos_val += rate * M_PI_X2;
if (cos_val > M_PI_X2)
cos_val -= M_PI_X2;
double wave = sin(sin_val);
bytes[i] = (uint8_t)(wave * 255.0);
double wave = cos(cos_val);
bytes[i] = (uint8_t)((wave+1.0)*0.5 * 255.0);
}
struct source_audio data;