clang-format: Apply formatting

Code submissions have continually suffered from formatting
inconsistencies that constantly have to be addressed.  Using
clang-format simplifies this by making code formatting more consistent,
and allows automation of the code formatting so that maintainers can
focus more on the code itself instead of code formatting.
This commit is contained in:
jp9000
2019-06-22 22:13:45 -07:00
parent 53615ee10f
commit f53df7da64
567 changed files with 34068 additions and 32903 deletions

View File

@@ -31,11 +31,14 @@ extern profiler_name_store_t *obs_get_profiler_name_store(void);
/* #define DEBUG_AUDIO */
#define nop() do {int invalid = 0;} while(0)
#define nop() \
do { \
int invalid = 0; \
} while (0)
struct audio_input {
struct audio_convert_info conversion;
audio_resampler_t *resampler;
audio_resampler_t *resampler;
audio_output_callback_t callback;
void *param;
@@ -52,20 +55,20 @@ struct audio_mix {
};
struct audio_output {
struct audio_output_info info;
size_t block_size;
size_t channels;
size_t planes;
struct audio_output_info info;
size_t block_size;
size_t channels;
size_t planes;
pthread_t thread;
os_event_t *stop_event;
pthread_t thread;
os_event_t *stop_event;
bool initialized;
bool initialized;
audio_input_callback_t input_cb;
void *input_param;
pthread_mutex_t input_mutex;
struct audio_mix mixes[MAX_AUDIO_MIXES];
audio_input_callback_t input_cb;
void *input_param;
pthread_mutex_t input_mutex;
struct audio_mix mixes[MAX_AUDIO_MIXES];
};
/* ------------------------------------------------------------------------- */
@@ -84,7 +87,7 @@ static inline double ts_to_frames(const audio_t *audio, uint64_t ts)
static inline double positive_round(double val)
{
return floor(val+0.5);
return floor(val + 0.5);
}
static int64_t ts_diff_frames(const audio_t *audio, uint64_t ts1, uint64_t ts2)
@@ -116,33 +119,32 @@ static inline size_t min_size(size_t a, size_t b)
#endif
static bool resample_audio_output(struct audio_input *input,
struct audio_data *data)
struct audio_data *data)
{
bool success = true;
if (input->resampler) {
uint8_t *output[MAX_AV_PLANES];
uint8_t *output[MAX_AV_PLANES];
uint32_t frames;
uint64_t offset;
memset(output, 0, sizeof(output));
success = audio_resampler_resample(input->resampler,
output, &frames, &offset,
(const uint8_t *const *)data->data,
data->frames);
success = audio_resampler_resample(
input->resampler, output, &frames, &offset,
(const uint8_t *const *)data->data, data->frames);
for (size_t i = 0; i < MAX_AV_PLANES; i++)
data->data[i] = output[i];
data->frames = frames;
data->frames = frames;
data->timestamp -= offset;
}
return success;
}
static inline void do_audio_output(struct audio_output *audio,
size_t mix_idx, uint64_t timestamp, uint32_t frames)
static inline void do_audio_output(struct audio_output *audio, size_t mix_idx,
uint64_t timestamp, uint32_t frames)
{
struct audio_mix *mix = &audio->mixes[mix_idx];
struct audio_data data;
@@ -150,10 +152,10 @@ static inline void do_audio_output(struct audio_output *audio,
pthread_mutex_lock(&audio->input_mutex);
for (size_t i = mix->inputs.num; i > 0; i--) {
struct audio_input *input = mix->inputs.array+(i-1);
struct audio_input *input = mix->inputs.array + (i - 1);
for (size_t i = 0; i < audio->planes; i++)
data.data[i] = (uint8_t*)mix->buffer[i];
data.data[i] = (uint8_t *)mix->buffer[i];
data.frames = frames;
data.timestamp = timestamp;
@@ -181,7 +183,7 @@ static inline void clamp_audio_output(struct audio_output *audio, size_t bytes)
while (mix_data < mix_end) {
float val = *mix_data;
val = (val > 1.0f) ? 1.0f : val;
val = (val > 1.0f) ? 1.0f : val;
val = (val < -1.0f) ? -1.0f : val;
*(mix_data++) = val;
}
@@ -189,8 +191,8 @@ static inline void clamp_audio_output(struct audio_output *audio, size_t bytes)
}
}
static void input_and_output(struct audio_output *audio,
uint64_t audio_time, uint64_t prev_time)
static void input_and_output(struct audio_output *audio, uint64_t audio_time,
uint64_t prev_time)
{
size_t bytes = AUDIO_OUTPUT_FRAMES * audio->block_size;
struct audio_output_data data[MAX_AUDIO_MIXES];
@@ -202,7 +204,7 @@ static void input_and_output(struct audio_output *audio,
#ifdef DEBUG_AUDIO
blog(LOG_DEBUG, "audio_time: %llu, prev_time: %llu, bytes: %lu",
audio_time, prev_time, bytes);
audio_time, prev_time, bytes);
#endif
/* get mixers */
@@ -217,8 +219,9 @@ static void input_and_output(struct audio_output *audio,
for (size_t mix_idx = 0; mix_idx < MAX_AUDIO_MIXES; mix_idx++) {
struct audio_mix *mix = &audio->mixes[mix_idx];
memset(mix->buffer[0], 0, AUDIO_OUTPUT_FRAMES *
MAX_AUDIO_CHANNELS * sizeof(float));
memset(mix->buffer[0], 0,
AUDIO_OUTPUT_FRAMES * MAX_AUDIO_CHANNELS *
sizeof(float));
for (size_t i = 0; i < audio->planes; i++)
data[mix_idx].data[i] = mix->buffer[i];
@@ -226,7 +229,7 @@ static void input_and_output(struct audio_output *audio,
/* get new audio data */
success = audio->input_cb(audio->input_param, prev_time, audio_time,
&new_ts, active_mixes, data);
&new_ts, active_mixes, data);
if (!success)
return;
@@ -246,15 +249,14 @@ static void *audio_thread(void *param)
uint64_t start_time = os_gettime_ns();
uint64_t prev_time = start_time;
uint64_t audio_time = prev_time;
uint32_t audio_wait_time =
(uint32_t)(audio_frames_to_ns(rate, AUDIO_OUTPUT_FRAMES) /
1000000);
uint32_t audio_wait_time = (uint32_t)(
audio_frames_to_ns(rate, AUDIO_OUTPUT_FRAMES) / 1000000);
os_set_thread_name("audio-io: audio thread");
const char *audio_thread_name =
profile_store_name(obs_get_profiler_name_store(),
"audio_thread(%s)", audio->info.name);
"audio_thread(%s)", audio->info.name);
while (os_event_try(audio->stop_event) == EAGAIN) {
uint64_t cur_time;
@@ -266,8 +268,8 @@ static void *audio_thread(void *param)
cur_time = os_gettime_ns();
while (audio_time <= cur_time) {
samples += AUDIO_OUTPUT_FRAMES;
audio_time = start_time +
audio_frames_to_ns(rate, samples);
audio_time =
start_time + audio_frames_to_ns(rate, samples);
input_and_output(audio, audio_time, prev_time);
prev_time = audio_time;
@@ -284,12 +286,12 @@ static void *audio_thread(void *param)
/* ------------------------------------------------------------------------- */
static size_t audio_get_input_idx(const audio_t *audio, size_t mix_idx,
audio_output_callback_t callback, void *param)
audio_output_callback_t callback, void *param)
{
const struct audio_mix *mix = &audio->mixes[mix_idx];
for (size_t i = 0; i < mix->inputs.num; i++) {
struct audio_input *input = mix->inputs.array+i;
struct audio_input *input = mix->inputs.array + i;
if (input->callback == callback && input->param == param)
return i;
@@ -299,27 +301,25 @@ static size_t audio_get_input_idx(const audio_t *audio, size_t mix_idx,
}
static inline bool audio_input_init(struct audio_input *input,
struct audio_output *audio)
struct audio_output *audio)
{
if (input->conversion.format != audio->info.format ||
if (input->conversion.format != audio->info.format ||
input->conversion.samples_per_sec != audio->info.samples_per_sec ||
input->conversion.speakers != audio->info.speakers) {
input->conversion.speakers != audio->info.speakers) {
struct resample_info from = {
.format = audio->info.format,
.format = audio->info.format,
.samples_per_sec = audio->info.samples_per_sec,
.speakers = audio->info.speakers
};
.speakers = audio->info.speakers};
struct resample_info to = {
.format = input->conversion.format,
.format = input->conversion.format,
.samples_per_sec = input->conversion.samples_per_sec,
.speakers = input->conversion.speakers
};
.speakers = input->conversion.speakers};
input->resampler = audio_resampler_create(&to, &from);
if (!input->resampler) {
blog(LOG_ERROR, "audio_input_init: Failed to "
"create resampler");
"create resampler");
return false;
}
} else {
@@ -330,12 +330,13 @@ static inline bool audio_input_init(struct audio_input *input,
}
bool audio_output_connect(audio_t *audio, size_t mi,
const struct audio_convert_info *conversion,
audio_output_callback_t callback, void *param)
const struct audio_convert_info *conversion,
audio_output_callback_t callback, void *param)
{
bool success = false;
if (!audio || mi >= MAX_AUDIO_MIXES) return false;
if (!audio || mi >= MAX_AUDIO_MIXES)
return false;
pthread_mutex_lock(&audio->input_mutex);
@@ -343,7 +344,7 @@ bool audio_output_connect(audio_t *audio, size_t mi,
struct audio_mix *mix = &audio->mixes[mi];
struct audio_input input;
input.callback = callback;
input.param = param;
input.param = param;
if (conversion) {
input.conversion = *conversion;
@@ -373,16 +374,17 @@ bool audio_output_connect(audio_t *audio, size_t mi,
}
void audio_output_disconnect(audio_t *audio, size_t mix_idx,
audio_output_callback_t callback, void *param)
audio_output_callback_t callback, void *param)
{
if (!audio || mix_idx >= MAX_AUDIO_MIXES) return;
if (!audio || mix_idx >= MAX_AUDIO_MIXES)
return;
pthread_mutex_lock(&audio->input_mutex);
size_t idx = audio_get_input_idx(audio, mix_idx, callback, param);
if (idx != DARRAY_INVALID) {
struct audio_mix *mix = &audio->mixes[mix_idx];
audio_input_free(mix->inputs.array+idx);
audio_input_free(mix->inputs.array + idx);
da_erase(mix->inputs, idx);
}
@@ -409,12 +411,12 @@ int audio_output_open(audio_t **audio, struct audio_output_info *info)
goto fail;
memcpy(&out->info, info, sizeof(struct audio_output_info));
out->channels = get_audio_channels(info->speakers);
out->planes = planar ? out->channels : 1;
out->input_cb = info->input_callback;
out->input_param= info->input_param;
out->channels = get_audio_channels(info->speakers);
out->planes = planar ? out->channels : 1;
out->input_cb = info->input_callback;
out->input_param = info->input_param;
out->block_size = (planar ? 1 : out->channels) *
get_audio_bytes_per_channel(info->format);
get_audio_bytes_per_channel(info->format);
if (pthread_mutexattr_init(&attr) != 0)
goto fail;
@@ -452,7 +454,7 @@ void audio_output_close(audio_t *audio)
struct audio_mix *mix = &audio->mixes[mix_idx];
for (size_t i = 0; i < mix->inputs.num; i++)
audio_input_free(mix->inputs.array+i);
audio_input_free(mix->inputs.array + i);
da_free(mix->inputs);
}
@@ -468,7 +470,8 @@ const struct audio_output_info *audio_output_get_info(const audio_t *audio)
bool audio_output_active(const audio_t *audio)
{
if (!audio) return false;
if (!audio)
return false;
for (size_t mix_idx = 0; mix_idx < MAX_AUDIO_MIXES; mix_idx++) {
const struct audio_mix *mix = &audio->mixes[mix_idx];

View File

@@ -25,13 +25,13 @@
extern "C" {
#endif
#define MAX_AUDIO_MIXES 6
#define MAX_AUDIO_CHANNELS 8
#define MAX_AUDIO_MIXES 6
#define MAX_AUDIO_CHANNELS 8
#define AUDIO_OUTPUT_FRAMES 1024
#define TOTAL_AUDIO_SIZE \
(MAX_AUDIO_MIXES * MAX_AUDIO_CHANNELS * \
AUDIO_OUTPUT_FRAMES * sizeof(float))
#define TOTAL_AUDIO_SIZE \
(MAX_AUDIO_MIXES * MAX_AUDIO_CHANNELS * AUDIO_OUTPUT_FRAMES * \
sizeof(float))
/*
* Base audio output component. Use this to create an audio output track
@@ -65,58 +65,67 @@ enum audio_format {
* https://trac.ffmpeg.org/wiki/AudioChannelManipulation
*/
enum speaker_layout {
SPEAKERS_UNKNOWN, /**< Unknown setting, fallback is stereo. */
SPEAKERS_MONO, /**< Channels: MONO */
SPEAKERS_STEREO, /**< Channels: FL, FR */
SPEAKERS_2POINT1, /**< Channels: FL, FR, LFE */
SPEAKERS_4POINT0, /**< Channels: FL, FR, FC, RC */
SPEAKERS_4POINT1, /**< Channels: FL, FR, FC, LFE, RC */
SPEAKERS_5POINT1, /**< Channels: FL, FR, FC, LFE, RL, RR */
SPEAKERS_7POINT1=8, /**< Channels: FL, FR, FC, LFE, RL, RR, SL, SR */
SPEAKERS_UNKNOWN, /**< Unknown setting, fallback is stereo. */
SPEAKERS_MONO, /**< Channels: MONO */
SPEAKERS_STEREO, /**< Channels: FL, FR */
SPEAKERS_2POINT1, /**< Channels: FL, FR, LFE */
SPEAKERS_4POINT0, /**< Channels: FL, FR, FC, RC */
SPEAKERS_4POINT1, /**< Channels: FL, FR, FC, LFE, RC */
SPEAKERS_5POINT1, /**< Channels: FL, FR, FC, LFE, RL, RR */
SPEAKERS_7POINT1 = 8, /**< Channels: FL, FR, FC, LFE, RL, RR, SL, SR */
};
struct audio_data {
uint8_t *data[MAX_AV_PLANES];
uint32_t frames;
uint64_t timestamp;
uint8_t *data[MAX_AV_PLANES];
uint32_t frames;
uint64_t timestamp;
};
struct audio_output_data {
float *data[MAX_AUDIO_CHANNELS];
float *data[MAX_AUDIO_CHANNELS];
};
typedef bool (*audio_input_callback_t)(void *param,
uint64_t start_ts, uint64_t end_ts, uint64_t *new_ts,
uint32_t active_mixers, struct audio_output_data *mixes);
typedef bool (*audio_input_callback_t)(void *param, uint64_t start_ts,
uint64_t end_ts, uint64_t *new_ts,
uint32_t active_mixers,
struct audio_output_data *mixes);
struct audio_output_info {
const char *name;
const char *name;
uint32_t samples_per_sec;
enum audio_format format;
uint32_t samples_per_sec;
enum audio_format format;
enum speaker_layout speakers;
audio_input_callback_t input_callback;
void *input_param;
void *input_param;
};
struct audio_convert_info {
uint32_t samples_per_sec;
enum audio_format format;
uint32_t samples_per_sec;
enum audio_format format;
enum speaker_layout speakers;
};
static inline uint32_t get_audio_channels(enum speaker_layout speakers)
{
switch (speakers) {
case SPEAKERS_MONO: return 1;
case SPEAKERS_STEREO: return 2;
case SPEAKERS_2POINT1: return 3;
case SPEAKERS_4POINT0: return 4;
case SPEAKERS_4POINT1: return 5;
case SPEAKERS_5POINT1: return 6;
case SPEAKERS_7POINT1: return 8;
case SPEAKERS_UNKNOWN: return 0;
case SPEAKERS_MONO:
return 1;
case SPEAKERS_STEREO:
return 2;
case SPEAKERS_2POINT1:
return 3;
case SPEAKERS_4POINT0:
return 4;
case SPEAKERS_4POINT1:
return 5;
case SPEAKERS_5POINT1:
return 6;
case SPEAKERS_7POINT1:
return 8;
case SPEAKERS_UNKNOWN:
return 0;
}
return 0;
@@ -169,23 +178,22 @@ static inline bool is_audio_planar(enum audio_format format)
}
static inline size_t get_audio_planes(enum audio_format format,
enum speaker_layout speakers)
enum speaker_layout speakers)
{
return (is_audio_planar(format) ? get_audio_channels(speakers) : 1);
}
static inline size_t get_audio_size(enum audio_format format,
enum speaker_layout speakers, uint32_t frames)
enum speaker_layout speakers,
uint32_t frames)
{
bool planar = is_audio_planar(format);
return (planar ? 1 : get_audio_channels(speakers)) *
get_audio_bytes_per_channel(format) *
frames;
get_audio_bytes_per_channel(format) * frames;
}
static inline uint64_t audio_frames_to_ns(size_t sample_rate,
uint64_t frames)
static inline uint64_t audio_frames_to_ns(size_t sample_rate, uint64_t frames)
{
util_uint128_t val;
val = util_mul64_64(frames, 1000000000ULL);
@@ -193,8 +201,7 @@ static inline uint64_t audio_frames_to_ns(size_t sample_rate,
return val.low;
}
static inline uint64_t ns_to_audio_frames(size_t sample_rate,
uint64_t frames)
static inline uint64_t ns_to_audio_frames(size_t sample_rate, uint64_t frames)
{
util_uint128_t val;
val = util_mul64_64(frames, sample_rate);
@@ -202,21 +209,22 @@ static inline uint64_t ns_to_audio_frames(size_t sample_rate,
return val.low;
}
#define AUDIO_OUTPUT_SUCCESS 0
#define AUDIO_OUTPUT_SUCCESS 0
#define AUDIO_OUTPUT_INVALIDPARAM -1
#define AUDIO_OUTPUT_FAIL -2
#define AUDIO_OUTPUT_FAIL -2
EXPORT int audio_output_open(audio_t **audio, struct audio_output_info *info);
EXPORT void audio_output_close(audio_t *audio);
typedef void (*audio_output_callback_t)(void *param, size_t mix_idx,
struct audio_data *data);
struct audio_data *data);
EXPORT bool audio_output_connect(audio_t *video, size_t mix_idx,
const struct audio_convert_info *conversion,
audio_output_callback_t callback, void *param);
const struct audio_convert_info *conversion,
audio_output_callback_t callback, void *param);
EXPORT void audio_output_disconnect(audio_t *video, size_t mix_idx,
audio_output_callback_t callback, void *param);
audio_output_callback_t callback,
void *param);
EXPORT bool audio_output_active(const audio_t *audio);
@@ -224,9 +232,8 @@ EXPORT size_t audio_output_get_block_size(const audio_t *audio);
EXPORT size_t audio_output_get_planes(const audio_t *audio);
EXPORT size_t audio_output_get_channels(const audio_t *audio);
EXPORT uint32_t audio_output_get_sample_rate(const audio_t *audio);
EXPORT const struct audio_output_info *audio_output_get_info(
const audio_t *audio);
EXPORT const struct audio_output_info *
audio_output_get_info(const audio_t *audio);
#ifdef __cplusplus
}

View File

@@ -23,34 +23,43 @@
#include <libswresample/swresample.h>
struct audio_resampler {
struct SwrContext *context;
bool opened;
struct SwrContext *context;
bool opened;
uint32_t input_freq;
uint64_t input_layout;
uint32_t input_freq;
uint64_t input_layout;
enum AVSampleFormat input_format;
uint8_t *output_buffer[MAX_AV_PLANES];
uint64_t output_layout;
uint8_t *output_buffer[MAX_AV_PLANES];
uint64_t output_layout;
enum AVSampleFormat output_format;
int output_size;
uint32_t output_ch;
uint32_t output_freq;
uint32_t output_planes;
int output_size;
uint32_t output_ch;
uint32_t output_freq;
uint32_t output_planes;
};
static inline enum AVSampleFormat convert_audio_format(enum audio_format format)
{
switch (format) {
case AUDIO_FORMAT_UNKNOWN: return AV_SAMPLE_FMT_S16;
case AUDIO_FORMAT_U8BIT: return AV_SAMPLE_FMT_U8;
case AUDIO_FORMAT_16BIT: return AV_SAMPLE_FMT_S16;
case AUDIO_FORMAT_32BIT: return AV_SAMPLE_FMT_S32;
case AUDIO_FORMAT_FLOAT: return AV_SAMPLE_FMT_FLT;
case AUDIO_FORMAT_U8BIT_PLANAR: return AV_SAMPLE_FMT_U8P;
case AUDIO_FORMAT_16BIT_PLANAR: return AV_SAMPLE_FMT_S16P;
case AUDIO_FORMAT_32BIT_PLANAR: return AV_SAMPLE_FMT_S32P;
case AUDIO_FORMAT_FLOAT_PLANAR: return AV_SAMPLE_FMT_FLTP;
case AUDIO_FORMAT_UNKNOWN:
return AV_SAMPLE_FMT_S16;
case AUDIO_FORMAT_U8BIT:
return AV_SAMPLE_FMT_U8;
case AUDIO_FORMAT_16BIT:
return AV_SAMPLE_FMT_S16;
case AUDIO_FORMAT_32BIT:
return AV_SAMPLE_FMT_S32;
case AUDIO_FORMAT_FLOAT:
return AV_SAMPLE_FMT_FLT;
case AUDIO_FORMAT_U8BIT_PLANAR:
return AV_SAMPLE_FMT_U8P;
case AUDIO_FORMAT_16BIT_PLANAR:
return AV_SAMPLE_FMT_S16P;
case AUDIO_FORMAT_32BIT_PLANAR:
return AV_SAMPLE_FMT_S32P;
case AUDIO_FORMAT_FLOAT_PLANAR:
return AV_SAMPLE_FMT_FLTP;
}
/* shouldn't get here */
@@ -60,14 +69,22 @@ static inline enum AVSampleFormat convert_audio_format(enum audio_format format)
static inline uint64_t convert_speaker_layout(enum speaker_layout layout)
{
switch (layout) {
case SPEAKERS_UNKNOWN: return 0;
case SPEAKERS_MONO: return AV_CH_LAYOUT_MONO;
case SPEAKERS_STEREO: return AV_CH_LAYOUT_STEREO;
case SPEAKERS_2POINT1: return AV_CH_LAYOUT_SURROUND;
case SPEAKERS_4POINT0: return AV_CH_LAYOUT_4POINT0;
case SPEAKERS_4POINT1: return AV_CH_LAYOUT_4POINT1;
case SPEAKERS_5POINT1: return AV_CH_LAYOUT_5POINT1_BACK;
case SPEAKERS_7POINT1: return AV_CH_LAYOUT_7POINT1;
case SPEAKERS_UNKNOWN:
return 0;
case SPEAKERS_MONO:
return AV_CH_LAYOUT_MONO;
case SPEAKERS_STEREO:
return AV_CH_LAYOUT_STEREO;
case SPEAKERS_2POINT1:
return AV_CH_LAYOUT_SURROUND;
case SPEAKERS_4POINT0:
return AV_CH_LAYOUT_4POINT0;
case SPEAKERS_4POINT1:
return AV_CH_LAYOUT_4POINT1;
case SPEAKERS_5POINT1:
return AV_CH_LAYOUT_5POINT1_BACK;
case SPEAKERS_7POINT1:
return AV_CH_LAYOUT_7POINT1;
}
/* shouldn't get here */
@@ -75,26 +92,27 @@ static inline uint64_t convert_speaker_layout(enum speaker_layout layout)
}
audio_resampler_t *audio_resampler_create(const struct resample_info *dst,
const struct resample_info *src)
const struct resample_info *src)
{
struct audio_resampler *rs = bzalloc(sizeof(struct audio_resampler));
int errcode;
rs->opened = false;
rs->input_freq = src->samples_per_sec;
rs->input_layout = convert_speaker_layout(src->speakers);
rs->input_format = convert_audio_format(src->format);
rs->output_size = 0;
rs->output_ch = get_audio_channels(dst->speakers);
rs->output_freq = dst->samples_per_sec;
rs->opened = false;
rs->input_freq = src->samples_per_sec;
rs->input_layout = convert_speaker_layout(src->speakers);
rs->input_format = convert_audio_format(src->format);
rs->output_size = 0;
rs->output_ch = get_audio_channels(dst->speakers);
rs->output_freq = dst->samples_per_sec;
rs->output_layout = convert_speaker_layout(dst->speakers);
rs->output_format = convert_audio_format(dst->format);
rs->output_planes = is_audio_planar(dst->format) ? rs->output_ch : 1;
rs->context = swr_alloc_set_opts(NULL,
rs->output_layout, rs->output_format, dst->samples_per_sec,
rs->input_layout, rs->input_format, src->samples_per_sec,
0, NULL);
rs->context = swr_alloc_set_opts(NULL, rs->output_layout,
rs->output_format,
dst->samples_per_sec, rs->input_layout,
rs->input_format, src->samples_per_sec,
0, NULL);
if (!rs->context) {
blog(LOG_ERROR, "swr_alloc_set_opts failed");
@@ -104,23 +122,25 @@ audio_resampler_t *audio_resampler_create(const struct resample_info *dst,
if (rs->input_layout == AV_CH_LAYOUT_MONO && rs->output_ch > 1) {
const double matrix[MAX_AUDIO_CHANNELS][MAX_AUDIO_CHANNELS] = {
{1},
{1, 1},
{1, 1, 0},
{1, 1, 1, 1},
{1, 1, 1, 0, 1},
{1, 1, 1, 1, 1, 1},
{1, 1, 1, 0, 1, 1, 1},
{1, 1, 1, 0, 1, 1, 1, 1},
{1},
{1, 1},
{1, 1, 0},
{1, 1, 1, 1},
{1, 1, 1, 0, 1},
{1, 1, 1, 1, 1, 1},
{1, 1, 1, 0, 1, 1, 1},
{1, 1, 1, 0, 1, 1, 1, 1},
};
if (swr_set_matrix(rs->context, matrix[rs->output_ch - 1], 1) < 0)
blog(LOG_DEBUG, "swr_set_matrix failed for mono upmix\n");
if (swr_set_matrix(rs->context, matrix[rs->output_ch - 1], 1) <
0)
blog(LOG_DEBUG,
"swr_set_matrix failed for mono upmix\n");
}
errcode = swr_init(rs->context);
if (errcode != 0) {
blog(LOG_ERROR, "avresample_open failed: error code %d",
errcode);
errcode);
audio_resampler_destroy(rs);
return NULL;
}
@@ -140,20 +160,21 @@ void audio_resampler_destroy(audio_resampler_t *rs)
}
}
bool audio_resampler_resample(audio_resampler_t *rs,
uint8_t *output[], uint32_t *out_frames, uint64_t *ts_offset,
const uint8_t *const input[], uint32_t in_frames)
bool audio_resampler_resample(audio_resampler_t *rs, uint8_t *output[],
uint32_t *out_frames, uint64_t *ts_offset,
const uint8_t *const input[], uint32_t in_frames)
{
if (!rs) return false;
if (!rs)
return false;
struct SwrContext *context = rs->context;
int ret;
int64_t delay = swr_get_delay(context, rs->input_freq);
int estimated = (int)av_rescale_rnd(
delay + (int64_t)in_frames,
(int64_t)rs->output_freq, (int64_t)rs->input_freq,
AV_ROUND_UP);
int estimated = (int)av_rescale_rnd(delay + (int64_t)in_frames,
(int64_t)rs->output_freq,
(int64_t)rs->input_freq,
AV_ROUND_UP);
*ts_offset = (uint64_t)swr_get_delay(context, 1000000000);
@@ -163,14 +184,13 @@ bool audio_resampler_resample(audio_resampler_t *rs,
av_freep(&rs->output_buffer[0]);
av_samples_alloc(rs->output_buffer, NULL, rs->output_ch,
estimated, rs->output_format, 0);
estimated, rs->output_format, 0);
rs->output_size = estimated;
}
ret = swr_convert(context,
rs->output_buffer, rs->output_size,
(const uint8_t**)input, in_frames);
ret = swr_convert(context, rs->output_buffer, rs->output_size,
(const uint8_t **)input, in_frames);
if (ret < 0) {
blog(LOG_ERROR, "swr_convert failed: %d", ret);

View File

@@ -28,18 +28,21 @@ struct audio_resampler;
typedef struct audio_resampler audio_resampler_t;
struct resample_info {
uint32_t samples_per_sec;
enum audio_format format;
uint32_t samples_per_sec;
enum audio_format format;
enum speaker_layout speakers;
};
EXPORT audio_resampler_t *audio_resampler_create(const struct resample_info *dst,
const struct resample_info *src);
EXPORT audio_resampler_t *
audio_resampler_create(const struct resample_info *dst,
const struct resample_info *src);
EXPORT void audio_resampler_destroy(audio_resampler_t *resampler);
EXPORT bool audio_resampler_resample(audio_resampler_t *resampler,
uint8_t *output[], uint32_t *out_frames, uint64_t *ts_offset,
const uint8_t *const input[], uint32_t in_frames);
uint8_t *output[], uint32_t *out_frames,
uint64_t *ts_offset,
const uint8_t *const input[],
uint32_t in_frames);
#ifdef __cplusplus
}

View File

@@ -22,194 +22,191 @@
/* ...surprisingly, if I don't use a macro to force inlining, it causes the
* CPU usage to boost by a tremendous amount in debug builds. */
#define get_m128_32_0(val) (*((uint32_t*)&val))
#define get_m128_32_1(val) (*(((uint32_t*)&val)+1))
#define get_m128_32_0(val) (*((uint32_t *)&val))
#define get_m128_32_1(val) (*(((uint32_t *)&val) + 1))
#define pack_shift(lum_plane, lum_pos0, lum_pos1, line1, line2, mask, sh) \
do { \
__m128i pack_val = _mm_packs_epi32( \
_mm_srli_si128(_mm_and_si128(line1, mask), sh), \
_mm_srli_si128(_mm_and_si128(line2, mask), sh)); \
pack_val = _mm_packus_epi16(pack_val, pack_val); \
\
*(uint32_t*)(lum_plane+lum_pos0) = get_m128_32_0(pack_val); \
*(uint32_t*)(lum_plane+lum_pos1) = get_m128_32_1(pack_val); \
} while (false)
#define pack_shift(lum_plane, lum_pos0, lum_pos1, line1, line2, mask, sh) \
do { \
__m128i pack_val = _mm_packs_epi32( \
_mm_srli_si128(_mm_and_si128(line1, mask), sh), \
_mm_srli_si128(_mm_and_si128(line2, mask), sh)); \
pack_val = _mm_packus_epi16(pack_val, pack_val); \
\
*(uint32_t *)(lum_plane + lum_pos0) = get_m128_32_0(pack_val); \
*(uint32_t *)(lum_plane + lum_pos1) = get_m128_32_1(pack_val); \
} while (false)
#define pack_val(lum_plane, lum_pos0, lum_pos1, line1, line2, mask) \
do { \
__m128i pack_val = _mm_packs_epi32( \
_mm_and_si128(line1, mask), \
_mm_and_si128(line2, mask)); \
pack_val = _mm_packus_epi16(pack_val, pack_val); \
\
*(uint32_t*)(lum_plane+lum_pos0) = get_m128_32_0(pack_val); \
*(uint32_t*)(lum_plane+lum_pos1) = get_m128_32_1(pack_val); \
} while (false)
#define pack_val(lum_plane, lum_pos0, lum_pos1, line1, line2, mask) \
do { \
__m128i pack_val = \
_mm_packs_epi32(_mm_and_si128(line1, mask), \
_mm_and_si128(line2, mask)); \
pack_val = _mm_packus_epi16(pack_val, pack_val); \
\
*(uint32_t *)(lum_plane + lum_pos0) = get_m128_32_0(pack_val); \
*(uint32_t *)(lum_plane + lum_pos1) = get_m128_32_1(pack_val); \
} while (false)
#define pack_ch_1plane(uv_plane, chroma_pos, line1, line2, uv_mask) \
do { \
__m128i add_val = _mm_add_epi64( \
_mm_and_si128(line1, uv_mask), \
_mm_and_si128(line2, uv_mask)); \
__m128i avg_val = _mm_add_epi64( \
add_val, \
_mm_shuffle_epi32(add_val, _MM_SHUFFLE(2, 3, 0, 1))); \
avg_val = _mm_srai_epi16(avg_val, 2); \
avg_val = _mm_shuffle_epi32(avg_val, _MM_SHUFFLE(3, 1, 2, 0)); \
avg_val = _mm_packus_epi16(avg_val, avg_val); \
\
*(uint32_t*)(uv_plane+chroma_pos) = get_m128_32_0(avg_val); \
} while (false)
#define pack_ch_2plane(u_plane, v_plane, chroma_pos, line1, line2, uv_mask) \
do { \
uint32_t packed_vals; \
\
__m128i add_val = _mm_add_epi64( \
_mm_and_si128(line1, uv_mask), \
_mm_and_si128(line2, uv_mask)); \
__m128i avg_val = _mm_add_epi64( \
add_val, \
_mm_shuffle_epi32(add_val, _MM_SHUFFLE(2, 3, 0, 1))); \
avg_val = _mm_srai_epi16(avg_val, 2); \
avg_val = _mm_shuffle_epi32(avg_val, _MM_SHUFFLE(3, 1, 2, 0)); \
avg_val = _mm_shufflelo_epi16(avg_val, _MM_SHUFFLE(3, 1, 2, 0)); \
avg_val = _mm_packus_epi16(avg_val, avg_val); \
\
packed_vals = get_m128_32_0(avg_val); \
\
*(uint16_t*)(u_plane+chroma_pos) = (uint16_t)(packed_vals); \
*(uint16_t*)(v_plane+chroma_pos) = (uint16_t)(packed_vals>>16); \
} while (false)
#define pack_ch_1plane(uv_plane, chroma_pos, line1, line2, uv_mask) \
do { \
__m128i add_val = \
_mm_add_epi64(_mm_and_si128(line1, uv_mask), \
_mm_and_si128(line2, uv_mask)); \
__m128i avg_val = _mm_add_epi64( \
add_val, \
_mm_shuffle_epi32(add_val, _MM_SHUFFLE(2, 3, 0, 1))); \
avg_val = _mm_srai_epi16(avg_val, 2); \
avg_val = _mm_shuffle_epi32(avg_val, _MM_SHUFFLE(3, 1, 2, 0)); \
avg_val = _mm_packus_epi16(avg_val, avg_val); \
\
*(uint32_t *)(uv_plane + chroma_pos) = get_m128_32_0(avg_val); \
} while (false)
#define pack_ch_2plane(u_plane, v_plane, chroma_pos, line1, line2, uv_mask) \
do { \
uint32_t packed_vals; \
\
__m128i add_val = \
_mm_add_epi64(_mm_and_si128(line1, uv_mask), \
_mm_and_si128(line2, uv_mask)); \
__m128i avg_val = _mm_add_epi64( \
add_val, \
_mm_shuffle_epi32(add_val, _MM_SHUFFLE(2, 3, 0, 1))); \
avg_val = _mm_srai_epi16(avg_val, 2); \
avg_val = _mm_shuffle_epi32(avg_val, _MM_SHUFFLE(3, 1, 2, 0)); \
avg_val = \
_mm_shufflelo_epi16(avg_val, _MM_SHUFFLE(3, 1, 2, 0)); \
avg_val = _mm_packus_epi16(avg_val, avg_val); \
\
packed_vals = get_m128_32_0(avg_val); \
\
*(uint16_t *)(u_plane + chroma_pos) = (uint16_t)(packed_vals); \
*(uint16_t *)(v_plane + chroma_pos) = \
(uint16_t)(packed_vals >> 16); \
} while (false)
static FORCE_INLINE uint32_t min_uint32(uint32_t a, uint32_t b)
{
return a < b ? a : b;
}
void compress_uyvx_to_i420(
const uint8_t *input, uint32_t in_linesize,
uint32_t start_y, uint32_t end_y,
uint8_t *output[], const uint32_t out_linesize[])
void compress_uyvx_to_i420(const uint8_t *input, uint32_t in_linesize,
uint32_t start_y, uint32_t end_y, uint8_t *output[],
const uint32_t out_linesize[])
{
uint8_t *lum_plane = output[0];
uint8_t *u_plane = output[1];
uint8_t *v_plane = output[2];
uint32_t width = min_uint32(in_linesize, out_linesize[0]);
uint8_t *lum_plane = output[0];
uint8_t *u_plane = output[1];
uint8_t *v_plane = output[2];
uint32_t width = min_uint32(in_linesize, out_linesize[0]);
uint32_t y;
__m128i lum_mask = _mm_set1_epi32(0x0000FF00);
__m128i uv_mask = _mm_set1_epi16(0x00FF);
__m128i uv_mask = _mm_set1_epi16(0x00FF);
for (y = start_y; y < end_y; y += 2) {
uint32_t y_pos = y * in_linesize;
uint32_t chroma_y_pos = (y>>1) * out_linesize[1];
uint32_t lum_y_pos = y * out_linesize[0];
uint32_t y_pos = y * in_linesize;
uint32_t chroma_y_pos = (y >> 1) * out_linesize[1];
uint32_t lum_y_pos = y * out_linesize[0];
uint32_t x;
for (x = 0; x < width; x += 4) {
const uint8_t *img = input + y_pos + x*4;
uint32_t lum_pos0 = lum_y_pos + x;
uint32_t lum_pos1 = lum_pos0 + out_linesize[0];
const uint8_t *img = input + y_pos + x * 4;
uint32_t lum_pos0 = lum_y_pos + x;
uint32_t lum_pos1 = lum_pos0 + out_linesize[0];
__m128i line1 = _mm_load_si128((const __m128i*)img);
__m128i line1 = _mm_load_si128((const __m128i *)img);
__m128i line2 = _mm_load_si128(
(const __m128i*)(img + in_linesize));
(const __m128i *)(img + in_linesize));
pack_shift(lum_plane, lum_pos0, lum_pos1,
line1, line2, lum_mask, 1);
pack_shift(lum_plane, lum_pos0, lum_pos1, line1, line2,
lum_mask, 1);
pack_ch_2plane(u_plane, v_plane,
chroma_y_pos + (x>>1),
line1, line2, uv_mask);
chroma_y_pos + (x >> 1), line1, line2,
uv_mask);
}
}
}
void compress_uyvx_to_nv12(
const uint8_t *input, uint32_t in_linesize,
uint32_t start_y, uint32_t end_y,
uint8_t *output[], const uint32_t out_linesize[])
void compress_uyvx_to_nv12(const uint8_t *input, uint32_t in_linesize,
uint32_t start_y, uint32_t end_y, uint8_t *output[],
const uint32_t out_linesize[])
{
uint8_t *lum_plane = output[0];
uint8_t *lum_plane = output[0];
uint8_t *chroma_plane = output[1];
uint32_t width = min_uint32(in_linesize, out_linesize[0]);
uint32_t width = min_uint32(in_linesize, out_linesize[0]);
uint32_t y;
__m128i lum_mask = _mm_set1_epi32(0x0000FF00);
__m128i uv_mask = _mm_set1_epi16(0x00FF);
__m128i uv_mask = _mm_set1_epi16(0x00FF);
for (y = start_y; y < end_y; y += 2) {
uint32_t y_pos = y * in_linesize;
uint32_t chroma_y_pos = (y>>1) * out_linesize[1];
uint32_t lum_y_pos = y * out_linesize[0];
uint32_t y_pos = y * in_linesize;
uint32_t chroma_y_pos = (y >> 1) * out_linesize[1];
uint32_t lum_y_pos = y * out_linesize[0];
uint32_t x;
for (x = 0; x < width; x += 4) {
const uint8_t *img = input + y_pos + x*4;
uint32_t lum_pos0 = lum_y_pos + x;
uint32_t lum_pos1 = lum_pos0 + out_linesize[0];
const uint8_t *img = input + y_pos + x * 4;
uint32_t lum_pos0 = lum_y_pos + x;
uint32_t lum_pos1 = lum_pos0 + out_linesize[0];
__m128i line1 = _mm_load_si128((const __m128i*)img);
__m128i line1 = _mm_load_si128((const __m128i *)img);
__m128i line2 = _mm_load_si128(
(const __m128i*)(img + in_linesize));
(const __m128i *)(img + in_linesize));
pack_shift(lum_plane, lum_pos0, lum_pos1,
line1, line2, lum_mask, 1);
pack_ch_1plane(chroma_plane, chroma_y_pos + x,
line1, line2, uv_mask);
pack_shift(lum_plane, lum_pos0, lum_pos1, line1, line2,
lum_mask, 1);
pack_ch_1plane(chroma_plane, chroma_y_pos + x, line1,
line2, uv_mask);
}
}
}
void convert_uyvx_to_i444(
const uint8_t *input, uint32_t in_linesize,
uint32_t start_y, uint32_t end_y,
uint8_t *output[], const uint32_t out_linesize[])
void convert_uyvx_to_i444(const uint8_t *input, uint32_t in_linesize,
uint32_t start_y, uint32_t end_y, uint8_t *output[],
const uint32_t out_linesize[])
{
uint8_t *lum_plane = output[0];
uint8_t *u_plane = output[1];
uint8_t *v_plane = output[2];
uint32_t width = min_uint32(in_linesize, out_linesize[0]);
uint8_t *lum_plane = output[0];
uint8_t *u_plane = output[1];
uint8_t *v_plane = output[2];
uint32_t width = min_uint32(in_linesize, out_linesize[0]);
uint32_t y;
__m128i lum_mask = _mm_set1_epi32(0x0000FF00);
__m128i u_mask = _mm_set1_epi32(0x000000FF);
__m128i v_mask = _mm_set1_epi32(0x00FF0000);
__m128i u_mask = _mm_set1_epi32(0x000000FF);
__m128i v_mask = _mm_set1_epi32(0x00FF0000);
for (y = start_y; y < end_y; y += 2) {
uint32_t y_pos = y * in_linesize;
uint32_t lum_y_pos = y * out_linesize[0];
uint32_t y_pos = y * in_linesize;
uint32_t lum_y_pos = y * out_linesize[0];
uint32_t x;
for (x = 0; x < width; x += 4) {
const uint8_t *img = input + y_pos + x*4;
uint32_t lum_pos0 = lum_y_pos + x;
uint32_t lum_pos1 = lum_pos0 + out_linesize[0];
const uint8_t *img = input + y_pos + x * 4;
uint32_t lum_pos0 = lum_y_pos + x;
uint32_t lum_pos1 = lum_pos0 + out_linesize[0];
__m128i line1 = _mm_load_si128((const __m128i*)img);
__m128i line1 = _mm_load_si128((const __m128i *)img);
__m128i line2 = _mm_load_si128(
(const __m128i*)(img + in_linesize));
(const __m128i *)(img + in_linesize));
pack_shift(lum_plane, lum_pos0, lum_pos1,
line1, line2, lum_mask, 1);
pack_val(u_plane, lum_pos0, lum_pos1,
line1, line2, u_mask);
pack_shift(v_plane, lum_pos0, lum_pos1,
line1, line2, v_mask, 2);
pack_shift(lum_plane, lum_pos0, lum_pos1, line1, line2,
lum_mask, 1);
pack_val(u_plane, lum_pos0, lum_pos1, line1, line2,
u_mask);
pack_shift(v_plane, lum_pos0, lum_pos1, line1, line2,
v_mask, 2);
}
}
}
void decompress_420(
const uint8_t *const input[], const uint32_t in_linesize[],
uint32_t start_y, uint32_t end_y,
uint8_t *output, uint32_t out_linesize)
void decompress_420(const uint8_t *const input[], const uint32_t in_linesize[],
uint32_t start_y, uint32_t end_y, uint8_t *output,
uint32_t out_linesize)
{
uint32_t start_y_d2 = start_y/2;
uint32_t width_d2 = in_linesize[0]/2;
uint32_t height_d2 = end_y/2;
uint32_t start_y_d2 = start_y / 2;
uint32_t width_d2 = in_linesize[0] / 2;
uint32_t height_d2 = end_y / 2;
uint32_t y;
for (y = start_y_d2; y < height_d2; y++) {
@@ -221,8 +218,8 @@ void decompress_420(
lum0 = input[0] + y * 2 * in_linesize[0];
lum1 = lum0 + in_linesize[0];
output0 = (uint32_t*)(output + y * 2 * out_linesize);
output1 = (uint32_t*)((uint8_t*)output0 + out_linesize);
output0 = (uint32_t *)(output + y * 2 * out_linesize);
output1 = (uint32_t *)((uint8_t *)output0 + out_linesize);
for (x = 0; x < width_d2; x++) {
uint32_t out;
@@ -237,14 +234,13 @@ void decompress_420(
}
}
void decompress_nv12(
const uint8_t *const input[], const uint32_t in_linesize[],
uint32_t start_y, uint32_t end_y,
uint8_t *output, uint32_t out_linesize)
void decompress_nv12(const uint8_t *const input[], const uint32_t in_linesize[],
uint32_t start_y, uint32_t end_y, uint8_t *output,
uint32_t out_linesize)
{
uint32_t start_y_d2 = start_y/2;
uint32_t width_d2 = min_uint32(in_linesize[0], out_linesize)/2;
uint32_t height_d2 = end_y/2;
uint32_t start_y_d2 = start_y / 2;
uint32_t width_d2 = min_uint32(in_linesize[0], out_linesize) / 2;
uint32_t height_d2 = end_y / 2;
uint32_t y;
for (y = start_y_d2; y < height_d2; y++) {
@@ -253,11 +249,11 @@ void decompress_nv12(
register uint32_t *output0, *output1;
uint32_t x;
chroma = (const uint16_t*)(input[1] + y * in_linesize[1]);
chroma = (const uint16_t *)(input[1] + y * in_linesize[1]);
lum0 = input[0] + y * 2 * in_linesize[0];
lum1 = lum0 + in_linesize[0];
output0 = (uint32_t*)(output + y * 2 * out_linesize);
output1 = (uint32_t*)((uint8_t*)output0 + out_linesize);
output0 = (uint32_t *)(output + y * 2 * out_linesize);
output1 = (uint32_t *)((uint8_t *)output0 + out_linesize);
for (x = 0; x < width_d2; x++) {
uint32_t out = *(chroma++) << 8;
@@ -271,31 +267,29 @@ void decompress_nv12(
}
}
void decompress_422(
const uint8_t *input, uint32_t in_linesize,
uint32_t start_y, uint32_t end_y,
uint8_t *output, uint32_t out_linesize,
bool leading_lum)
void decompress_422(const uint8_t *input, uint32_t in_linesize,
uint32_t start_y, uint32_t end_y, uint8_t *output,
uint32_t out_linesize, bool leading_lum)
{
uint32_t width_d2 = min_uint32(in_linesize, out_linesize)/2;
uint32_t width_d2 = min_uint32(in_linesize, out_linesize) / 2;
uint32_t y;
register const uint32_t *input32;
register const uint32_t *input32_end;
register uint32_t *output32;
register uint32_t *output32;
if (leading_lum) {
for (y = start_y; y < end_y; y++) {
input32 = (const uint32_t*)(input + y*in_linesize);
input32 = (const uint32_t *)(input + y * in_linesize);
input32_end = input32 + width_d2;
output32 = (uint32_t*)(output + y*out_linesize);
output32 = (uint32_t *)(output + y * out_linesize);
while(input32 < input32_end) {
while (input32 < input32_end) {
register uint32_t dw = *input32;
output32[0] = dw;
dw &= 0xFFFFFF00;
dw |= (uint8_t)(dw>>16);
dw |= (uint8_t)(dw >> 16);
output32[1] = dw;
output32 += 2;
@@ -304,16 +298,16 @@ void decompress_422(
}
} else {
for (y = start_y; y < end_y; y++) {
input32 = (const uint32_t*)(input + y*in_linesize);
input32 = (const uint32_t *)(input + y * in_linesize);
input32_end = input32 + width_d2;
output32 = (uint32_t*)(output + y*out_linesize);
output32 = (uint32_t *)(output + y * out_linesize);
while (input32 < input32_end) {
register uint32_t dw = *input32;
output32[0] = dw;
dw &= 0xFFFF00FF;
dw |= (dw>>16) & 0xFF00;
dw |= (dw >> 16) & 0xFF00;
output32[1] = dw;
output32 += 2;

View File

@@ -27,36 +27,34 @@ extern "C" {
* Functions for converting to and from packed 444 YUV
*/
EXPORT void compress_uyvx_to_i420(
const uint8_t *input, uint32_t in_linesize,
uint32_t start_y, uint32_t end_y,
uint8_t *output[], const uint32_t out_linesize[]);
EXPORT void compress_uyvx_to_i420(const uint8_t *input, uint32_t in_linesize,
uint32_t start_y, uint32_t end_y,
uint8_t *output[],
const uint32_t out_linesize[]);
EXPORT void compress_uyvx_to_nv12(
const uint8_t *input, uint32_t in_linesize,
uint32_t start_y, uint32_t end_y,
uint8_t *output[], const uint32_t out_linesize[]);
EXPORT void compress_uyvx_to_nv12(const uint8_t *input, uint32_t in_linesize,
uint32_t start_y, uint32_t end_y,
uint8_t *output[],
const uint32_t out_linesize[]);
EXPORT void convert_uyvx_to_i444(
const uint8_t *input, uint32_t in_linesize,
uint32_t start_y, uint32_t end_y,
uint8_t *output[], const uint32_t out_linesize[]);
EXPORT void convert_uyvx_to_i444(const uint8_t *input, uint32_t in_linesize,
uint32_t start_y, uint32_t end_y,
uint8_t *output[],
const uint32_t out_linesize[]);
EXPORT void decompress_nv12(
const uint8_t *const input[], const uint32_t in_linesize[],
uint32_t start_y, uint32_t end_y,
uint8_t *output, uint32_t out_linesize);
EXPORT void decompress_nv12(const uint8_t *const input[],
const uint32_t in_linesize[], uint32_t start_y,
uint32_t end_y, uint8_t *output,
uint32_t out_linesize);
EXPORT void decompress_420(
const uint8_t *const input[], const uint32_t in_linesize[],
uint32_t start_y, uint32_t end_y,
uint8_t *output, uint32_t out_linesize);
EXPORT void decompress_420(const uint8_t *const input[],
const uint32_t in_linesize[], uint32_t start_y,
uint32_t end_y, uint8_t *output,
uint32_t out_linesize);
EXPORT void decompress_422(
const uint8_t *input, uint32_t in_linesize,
uint32_t start_y, uint32_t end_y,
uint8_t *output, uint32_t out_linesize,
bool leading_lum);
EXPORT void decompress_422(const uint8_t *input, uint32_t in_linesize,
uint32_t start_y, uint32_t end_y, uint8_t *output,
uint32_t out_linesize, bool leading_lum);
#ifdef __cplusplus
}

View File

@@ -9,20 +9,20 @@ struct media_frames_per_second {
uint32_t denominator;
};
static inline double media_frames_per_second_to_frame_interval(
struct media_frames_per_second fps)
static inline double
media_frames_per_second_to_frame_interval(struct media_frames_per_second fps)
{
return (double)fps.denominator / fps.numerator;
}
static inline double media_frames_per_second_to_fps(
struct media_frames_per_second fps)
static inline double
media_frames_per_second_to_fps(struct media_frames_per_second fps)
{
return (double)fps.numerator / fps.denominator;
}
static inline bool media_frames_per_second_is_valid(
struct media_frames_per_second fps)
static inline bool
media_frames_per_second_is_valid(struct media_frames_per_second fps)
{
return fps.numerator && fps.denominator;
}

View File

@@ -54,7 +54,7 @@ static inline bool init_input(media_remux_job_t job, const char *in_filename)
int ret = avformat_open_input(&job->ifmt_ctx, in_filename, NULL, NULL);
if (ret < 0) {
blog(LOG_ERROR, "media_remux: Could not open input file '%s'",
in_filename);
in_filename);
return false;
}
@@ -76,16 +76,16 @@ static inline bool init_output(media_remux_job_t job, const char *out_filename)
int ret;
avformat_alloc_output_context2(&job->ofmt_ctx, NULL, NULL,
out_filename);
out_filename);
if (!job->ofmt_ctx) {
blog(LOG_ERROR, "media_remux: Could not create output context");
return false;
}
for (unsigned i = 0; i < job->ifmt_ctx->nb_streams; i++) {
AVStream *in_stream = job->ifmt_ctx->streams[i];
AVStream *out_stream = avformat_new_stream(job->ofmt_ctx,
in_stream->codec->codec);
AVStream *in_stream = job->ifmt_ctx->streams[i];
AVStream *out_stream = avformat_new_stream(
job->ofmt_ctx, in_stream->codec->codec);
if (!out_stream) {
blog(LOG_ERROR, "media_remux: Failed to allocate output"
" stream");
@@ -97,7 +97,7 @@ static inline bool init_output(media_remux_job_t job, const char *out_filename)
ret = avcodec_parameters_from_context(par, in_stream->codec);
if (ret == 0)
ret = avcodec_parameters_to_context(out_stream->codec,
par);
par);
avcodec_parameters_free(&par);
#else
ret = avcodec_copy_context(out_stream->codec, in_stream->codec);
@@ -124,8 +124,10 @@ static inline bool init_output(media_remux_job_t job, const char *out_filename)
ret = avio_open(&job->ofmt_ctx->pb, out_filename,
AVIO_FLAG_WRITE);
if (ret < 0) {
blog(LOG_ERROR, "media_remux: Failed to open output"
" file '%s'", out_filename);
blog(LOG_ERROR,
"media_remux: Failed to open output"
" file '%s'",
out_filename);
return false;
}
}
@@ -134,7 +136,7 @@ static inline bool init_output(media_remux_job_t job, const char *out_filename)
}
bool media_remux_job_create(media_remux_job_t *job, const char *in_filename,
const char *out_filename)
const char *out_filename)
{
if (!job)
return false;
@@ -167,23 +169,23 @@ fail:
return false;
}
static inline void process_packet(AVPacket *pkt,
AVStream *in_stream, AVStream *out_stream)
static inline void process_packet(AVPacket *pkt, AVStream *in_stream,
AVStream *out_stream)
{
pkt->pts = av_rescale_q_rnd(pkt->pts, in_stream->time_base,
out_stream->time_base,
AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
out_stream->time_base,
AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX);
pkt->dts = av_rescale_q_rnd(pkt->dts, in_stream->time_base,
out_stream->time_base,
AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
pkt->duration = (int)av_rescale_q(pkt->duration,
in_stream->time_base, out_stream->time_base);
out_stream->time_base,
AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX);
pkt->duration = (int)av_rescale_q(pkt->duration, in_stream->time_base,
out_stream->time_base);
pkt->pos = -1;
}
static inline int process_packets(media_remux_job_t job,
media_remux_progress_callback callback, void *data)
media_remux_progress_callback callback,
void *data)
{
AVPacket pkt;
@@ -192,9 +194,10 @@ static inline int process_packets(media_remux_job_t job,
ret = av_read_frame(job->ifmt_ctx, &pkt);
if (ret < 0) {
if (ret != AVERROR_EOF)
blog(LOG_ERROR, "media_remux: Error reading"
" packet: %s",
av_err2str(ret));
blog(LOG_ERROR,
"media_remux: Error reading"
" packet: %s",
av_err2str(ret));
break;
}
@@ -206,14 +209,14 @@ static inline int process_packets(media_remux_job_t job,
}
process_packet(&pkt, job->ifmt_ctx->streams[pkt.stream_index],
job->ofmt_ctx->streams[pkt.stream_index]);
job->ofmt_ctx->streams[pkt.stream_index]);
ret = av_interleaved_write_frame(job->ofmt_ctx, &pkt);
av_packet_unref(&pkt);
if (ret < 0) {
blog(LOG_ERROR, "media_remux: Error muxing packet: %s",
av_err2str(ret));
av_err2str(ret));
break;
}
}
@@ -222,7 +225,7 @@ static inline int process_packets(media_remux_job_t job,
}
bool media_remux_job_process(media_remux_job_t job,
media_remux_progress_callback callback, void *data)
media_remux_progress_callback callback, void *data)
{
int ret;
bool success = false;
@@ -233,7 +236,7 @@ bool media_remux_job_process(media_remux_job_t job,
ret = avformat_write_header(job->ofmt_ctx, NULL);
if (ret < 0) {
blog(LOG_ERROR, "media_remux: Error opening output file: %s",
av_err2str(ret));
av_err2str(ret));
return success;
}
@@ -246,7 +249,7 @@ bool media_remux_job_process(media_remux_job_t job,
ret = av_write_trailer(job->ofmt_ctx);
if (ret < 0) {
blog(LOG_ERROR, "media_remux: av_write_trailer: %s",
av_err2str(ret));
av_err2str(ret));
success = false;
}

View File

@@ -22,16 +22,18 @@
struct media_remux_job;
typedef struct media_remux_job *media_remux_job_t;
typedef bool (media_remux_progress_callback)(void *data, float percent);
typedef bool(media_remux_progress_callback)(void *data, float percent);
#ifdef __cplusplus
extern "C" {
#endif
EXPORT bool media_remux_job_create(media_remux_job_t *job,
const char *in_filename, const char *out_filename);
const char *in_filename,
const char *out_filename);
EXPORT bool media_remux_job_process(media_remux_job_t job,
media_remux_progress_callback callback, void *data);
media_remux_progress_callback callback,
void *data);
EXPORT void media_remux_job_destroy(media_remux_job_t job);
#ifdef __cplusplus

View File

@@ -24,31 +24,29 @@
enum video_format video_format_from_fourcc(uint32_t fourcc)
{
switch (fourcc) {
case MAKE_FOURCC('U','Y','V','Y'):
case MAKE_FOURCC('H','D','Y','C'):
case MAKE_FOURCC('U','Y','N','V'):
case MAKE_FOURCC('U','Y','N','Y'):
case MAKE_FOURCC('u','y','v','1'):
case MAKE_FOURCC('2','v','u','y'):
case MAKE_FOURCC('2','V','u','y'):
return VIDEO_FORMAT_UYVY;
case MAKE_FOURCC('U', 'Y', 'V', 'Y'):
case MAKE_FOURCC('H', 'D', 'Y', 'C'):
case MAKE_FOURCC('U', 'Y', 'N', 'V'):
case MAKE_FOURCC('U', 'Y', 'N', 'Y'):
case MAKE_FOURCC('u', 'y', 'v', '1'):
case MAKE_FOURCC('2', 'v', 'u', 'y'):
case MAKE_FOURCC('2', 'V', 'u', 'y'):
return VIDEO_FORMAT_UYVY;
case MAKE_FOURCC('Y','U','Y','2'):
case MAKE_FOURCC('Y','4','2','2'):
case MAKE_FOURCC('V','4','2','2'):
case MAKE_FOURCC('V','Y','U','Y'):
case MAKE_FOURCC('Y','U','N','V'):
case MAKE_FOURCC('y','u','v','2'):
case MAKE_FOURCC('y','u','v','s'):
return VIDEO_FORMAT_YUY2;
case MAKE_FOURCC('Y', 'U', 'Y', '2'):
case MAKE_FOURCC('Y', '4', '2', '2'):
case MAKE_FOURCC('V', '4', '2', '2'):
case MAKE_FOURCC('V', 'Y', 'U', 'Y'):
case MAKE_FOURCC('Y', 'U', 'N', 'V'):
case MAKE_FOURCC('y', 'u', 'v', '2'):
case MAKE_FOURCC('y', 'u', 'v', 's'):
return VIDEO_FORMAT_YUY2;
case MAKE_FOURCC('Y','V','Y','U'):
return VIDEO_FORMAT_YVYU;
case MAKE_FOURCC('Y', 'V', 'Y', 'U'):
return VIDEO_FORMAT_YVYU;
case MAKE_FOURCC('Y','8','0','0'):
return VIDEO_FORMAT_Y800;
case MAKE_FOURCC('Y', '8', '0', '0'):
return VIDEO_FORMAT_Y800;
}
return VIDEO_FORMAT_NONE;
}

View File

@@ -17,18 +17,18 @@
#include "video-frame.h"
#define ALIGN_SIZE(size, align) \
size = (((size)+(align-1)) & (~(align-1)))
#define ALIGN_SIZE(size, align) size = (((size) + (align - 1)) & (~(align - 1)))
/* messy code alarm */
void video_frame_init(struct video_frame *frame, enum video_format format,
uint32_t width, uint32_t height)
uint32_t width, uint32_t height)
{
size_t size;
size_t offsets[MAX_AV_PLANES];
int alignment = base_get_alignment();
int alignment = base_get_alignment();
if (!frame) return;
if (!frame)
return;
memset(frame, 0, sizeof(struct video_frame));
memset(offsets, 0, sizeof(offsets));
@@ -41,27 +41,27 @@ void video_frame_init(struct video_frame *frame, enum video_format format,
size = width * height;
ALIGN_SIZE(size, alignment);
offsets[0] = size;
size += (width/2) * (height/2);
size += (width / 2) * (height / 2);
ALIGN_SIZE(size, alignment);
offsets[1] = size;
size += (width/2) * (height/2);
size += (width / 2) * (height / 2);
ALIGN_SIZE(size, alignment);
frame->data[0] = bmalloc(size);
frame->data[1] = (uint8_t*)frame->data[0] + offsets[0];
frame->data[2] = (uint8_t*)frame->data[0] + offsets[1];
frame->data[1] = (uint8_t *)frame->data[0] + offsets[0];
frame->data[2] = (uint8_t *)frame->data[0] + offsets[1];
frame->linesize[0] = width;
frame->linesize[1] = width/2;
frame->linesize[2] = width/2;
frame->linesize[1] = width / 2;
frame->linesize[2] = width / 2;
break;
case VIDEO_FORMAT_NV12:
size = width * height;
ALIGN_SIZE(size, alignment);
offsets[0] = size;
size += (width/2) * (height/2) * 2;
size += (width / 2) * (height / 2) * 2;
ALIGN_SIZE(size, alignment);
frame->data[0] = bmalloc(size);
frame->data[1] = (uint8_t*)frame->data[0] + offsets[0];
frame->data[1] = (uint8_t *)frame->data[0] + offsets[0];
frame->linesize[0] = width;
frame->linesize[1] = width;
break;
@@ -79,7 +79,7 @@ void video_frame_init(struct video_frame *frame, enum video_format format,
size = width * height * 2;
ALIGN_SIZE(size, alignment);
frame->data[0] = bmalloc(size);
frame->linesize[0] = width*2;
frame->linesize[0] = width * 2;
break;
case VIDEO_FORMAT_RGBA:
@@ -88,15 +88,15 @@ void video_frame_init(struct video_frame *frame, enum video_format format,
size = width * height * 4;
ALIGN_SIZE(size, alignment);
frame->data[0] = bmalloc(size);
frame->linesize[0] = width*4;
frame->linesize[0] = width * 4;
break;
case VIDEO_FORMAT_I444:
size = width * height;
ALIGN_SIZE(size, alignment);
frame->data[0] = bmalloc(size * 3);
frame->data[1] = (uint8_t*)frame->data[0] + size;
frame->data[2] = (uint8_t*)frame->data[1] + size;
frame->data[1] = (uint8_t *)frame->data[0] + size;
frame->data[2] = (uint8_t *)frame->data[1] + size;
frame->linesize[0] = width;
frame->linesize[1] = width;
frame->linesize[2] = width;
@@ -106,13 +106,13 @@ void video_frame_init(struct video_frame *frame, enum video_format format,
size = width * height * 3;
ALIGN_SIZE(size, alignment);
frame->data[0] = bmalloc(size);
frame->linesize[0] = width*3;
frame->linesize[0] = width * 3;
break;
}
}
void video_frame_copy(struct video_frame *dst, const struct video_frame *src,
enum video_format format, uint32_t cy)
enum video_format format, uint32_t cy)
{
switch (format) {
case VIDEO_FORMAT_NONE:

View File

@@ -21,12 +21,13 @@
#include "video-io.h"
struct video_frame {
uint8_t *data[MAX_AV_PLANES];
uint8_t *data[MAX_AV_PLANES];
uint32_t linesize[MAX_AV_PLANES];
};
EXPORT void video_frame_init(struct video_frame *frame,
enum video_format format, uint32_t width, uint32_t height);
enum video_format format, uint32_t width,
uint32_t height);
static inline void video_frame_free(struct video_frame *frame)
{
@@ -36,12 +37,12 @@ static inline void video_frame_free(struct video_frame *frame)
}
}
static inline struct video_frame *video_frame_create(
enum video_format format, uint32_t width, uint32_t height)
static inline struct video_frame *
video_frame_create(enum video_format format, uint32_t width, uint32_t height)
{
struct video_frame *frame;
frame = (struct video_frame*)bzalloc(sizeof(struct video_frame));
frame = (struct video_frame *)bzalloc(sizeof(struct video_frame));
video_frame_init(frame, format, width, height);
return frame;
}
@@ -55,5 +56,5 @@ static inline void video_frame_destroy(struct video_frame *frame)
}
EXPORT void video_frame_copy(struct video_frame *dst,
const struct video_frame *src, enum video_format format,
uint32_t height);
const struct video_frame *src,
enum video_format format, uint32_t height);

View File

@@ -40,10 +40,10 @@ struct cached_frame_info {
};
struct video_input {
struct video_scale_info conversion;
video_scaler_t *scaler;
struct video_frame frame[MAX_CONVERT_BUFFERS];
int cur_frame;
struct video_scale_info conversion;
video_scaler_t *scaler;
struct video_frame frame[MAX_CONVERT_BUFFERS];
int cur_frame;
void (*callback)(void *param, struct video_data *frame);
void *param;
@@ -57,35 +57,35 @@ static inline void video_input_free(struct video_input *input)
}
struct video_output {
struct video_output_info info;
struct video_output_info info;
pthread_t thread;
pthread_mutex_t data_mutex;
bool stop;
pthread_t thread;
pthread_mutex_t data_mutex;
bool stop;
os_sem_t *update_semaphore;
uint64_t frame_time;
volatile long skipped_frames;
volatile long total_frames;
os_sem_t *update_semaphore;
uint64_t frame_time;
volatile long skipped_frames;
volatile long total_frames;
bool initialized;
bool initialized;
pthread_mutex_t input_mutex;
pthread_mutex_t input_mutex;
DARRAY(struct video_input) inputs;
size_t available_frames;
size_t first_added;
size_t last_added;
struct cached_frame_info cache[MAX_CACHE_SIZE];
size_t available_frames;
size_t first_added;
size_t last_added;
struct cached_frame_info cache[MAX_CACHE_SIZE];
volatile bool raw_active;
volatile long gpu_refs;
volatile bool raw_active;
volatile long gpu_refs;
};
/* ------------------------------------------------------------------------- */
static inline bool scale_video_output(struct video_input *input,
struct video_data *data)
struct video_data *data)
{
bool success = true;
@@ -97,14 +97,14 @@ static inline bool scale_video_output(struct video_input *input,
frame = &input->frame[input->cur_frame];
success = video_scaler_scale(input->scaler,
frame->data, frame->linesize,
(const uint8_t * const*)data->data,
data->linesize);
success = video_scaler_scale(input->scaler, frame->data,
frame->linesize,
(const uint8_t *const *)data->data,
data->linesize);
if (success) {
for (size_t i = 0; i < MAX_AV_PLANES; i++) {
data->data[i] = frame->data[i];
data->data[i] = frame->data[i];
data->linesize[i] = frame->linesize[i];
}
} else {
@@ -134,7 +134,7 @@ static inline bool video_output_cur_frame(struct video_output *video)
pthread_mutex_lock(&video->input_mutex);
for (size_t i = 0; i < video->inputs.num; i++) {
struct video_input *input = video->inputs.array+i;
struct video_input *input = video->inputs.array + i;
struct video_data frame = frame_info->frame;
if (scale_video_output(input, &frame))
@@ -177,7 +177,7 @@ static void *video_thread(void *param)
const char *video_thread_name =
profile_store_name(obs_get_profiler_name_store(),
"video_thread(%s)", video->info.name);
"video_thread(%s)", video->info.name);
while (os_sem_wait(video->update_semaphore) == 0) {
if (video->stop)
@@ -212,10 +212,10 @@ static inline void init_cache(struct video_output *video)
for (size_t i = 0; i < video->info.cache_size; i++) {
struct video_frame *frame;
frame = (struct video_frame*)&video->cache[i];
frame = (struct video_frame *)&video->cache[i];
video_frame_init(frame, video->info.format,
video->info.width, video->info.height);
video_frame_init(frame, video->info.format, video->info.width,
video->info.height);
}
video->available_frames = video->info.cache_size;
@@ -235,7 +235,7 @@ int video_output_open(video_t **video, struct video_output_info *info)
memcpy(&out->info, info, sizeof(struct video_output_info));
out->frame_time = (uint64_t)(1000000000.0 * (double)info->fps_den /
(double)info->fps_num);
(double)info->fps_num);
out->initialized = false;
if (pthread_mutexattr_init(&attr) != 0)
@@ -274,7 +274,7 @@ void video_output_close(video_t *video)
da_free(video->inputs);
for (size_t i = 0; i < video->info.cache_size; i++)
video_frame_free((struct video_frame*)&video->cache[i]);
video_frame_free((struct video_frame *)&video->cache[i]);
os_sem_destroy(video->update_semaphore);
pthread_mutex_destroy(&video->data_mutex);
@@ -283,11 +283,12 @@ void video_output_close(video_t *video)
}
static size_t video_get_input_idx(const video_t *video,
void (*callback)(void *param, struct video_data *frame),
void *param)
void (*callback)(void *param,
struct video_data *frame),
void *param)
{
for (size_t i = 0; i < video->inputs.num; i++) {
struct video_input *input = video->inputs.array+i;
struct video_input *input = video->inputs.array + i;
if (input->callback == callback && input->param == param)
return i;
}
@@ -296,38 +297,37 @@ static size_t video_get_input_idx(const video_t *video,
}
static inline bool video_input_init(struct video_input *input,
struct video_output *video)
struct video_output *video)
{
if (input->conversion.width != video->info.width ||
if (input->conversion.width != video->info.width ||
input->conversion.height != video->info.height ||
input->conversion.format != video->info.format) {
struct video_scale_info from = {
.format = video->info.format,
.width = video->info.width,
.height = video->info.height,
.range = video->info.range,
.colorspace = video->info.colorspace
};
struct video_scale_info from = {.format = video->info.format,
.width = video->info.width,
.height = video->info.height,
.range = video->info.range,
.colorspace =
video->info.colorspace};
int ret = video_scaler_create(&input->scaler,
&input->conversion, &from,
VIDEO_SCALE_FAST_BILINEAR);
&input->conversion, &from,
VIDEO_SCALE_FAST_BILINEAR);
if (ret != VIDEO_SCALER_SUCCESS) {
if (ret == VIDEO_SCALER_BAD_CONVERSION)
blog(LOG_ERROR, "video_input_init: Bad "
"scale conversion type");
"scale conversion type");
else
blog(LOG_ERROR, "video_input_init: Failed to "
"create scaler");
"create scaler");
return false;
}
for (size_t i = 0; i < MAX_CONVERT_BUFFERS; i++)
video_frame_init(&input->frame[i],
input->conversion.format,
input->conversion.width,
input->conversion.height);
input->conversion.format,
input->conversion.width,
input->conversion.height);
}
return true;
@@ -339,10 +339,9 @@ static inline void reset_frames(video_t *video)
os_atomic_set_long(&video->total_frames, 0);
}
bool video_output_connect(video_t *video,
const struct video_scale_info *conversion,
void (*callback)(void *param, struct video_data *frame),
void *param)
bool video_output_connect(
video_t *video, const struct video_scale_info *conversion,
void (*callback)(void *param, struct video_data *frame), void *param)
{
bool success = false;
@@ -356,14 +355,14 @@ bool video_output_connect(video_t *video,
memset(&input, 0, sizeof(input));
input.callback = callback;
input.param = param;
input.param = param;
if (conversion) {
input.conversion = *conversion;
} else {
input.conversion.format = video->info.format;
input.conversion.width = video->info.width;
input.conversion.height = video->info.height;
input.conversion.format = video->info.format;
input.conversion.width = video->info.width;
input.conversion.height = video->info.height;
}
if (input.conversion.width == 0)
@@ -393,22 +392,22 @@ static void log_skipped(video_t *video)
long skipped = os_atomic_load_long(&video->skipped_frames);
double percentage_skipped =
(double)skipped /
(double)os_atomic_load_long(&video->total_frames) *
100.0;
(double)os_atomic_load_long(&video->total_frames) * 100.0;
if (skipped)
blog(LOG_INFO, "Video stopped, number of "
"skipped frames due "
"to encoding lag: "
"%ld/%ld (%0.1f%%)",
video->skipped_frames,
video->total_frames,
percentage_skipped);
blog(LOG_INFO,
"Video stopped, number of "
"skipped frames due "
"to encoding lag: "
"%ld/%ld (%0.1f%%)",
video->skipped_frames, video->total_frames,
percentage_skipped);
}
void video_output_disconnect(video_t *video,
void (*callback)(void *param, struct video_data *frame),
void *param)
void (*callback)(void *param,
struct video_data *frame),
void *param)
{
if (!video || !callback)
return;
@@ -417,7 +416,7 @@ void video_output_disconnect(video_t *video,
size_t idx = video_get_input_idx(video, callback, param);
if (idx != DARRAY_INVALID) {
video_input_free(video->inputs.array+idx);
video_input_free(video->inputs.array + idx);
da_erase(video->inputs, idx);
if (video->inputs.num == 0) {
@@ -433,7 +432,8 @@ void video_output_disconnect(video_t *video,
bool video_output_active(const video_t *video)
{
if (!video) return false;
if (!video)
return false;
return os_atomic_load_bool(&video->raw_active);
}
@@ -443,12 +443,13 @@ const struct video_output_info *video_output_get_info(const video_t *video)
}
bool video_output_lock_frame(video_t *video, struct video_frame *frame,
int count, uint64_t timestamp)
int count, uint64_t timestamp)
{
struct cached_frame_info *cfi;
bool locked;
if (!video) return false;
if (!video)
return false;
pthread_mutex_lock(&video->data_mutex);
@@ -480,7 +481,8 @@ bool video_output_lock_frame(video_t *video, struct video_frame *frame,
void video_output_unlock_frame(video_t *video)
{
if (!video) return;
if (!video)
return;
pthread_mutex_lock(&video->data_mutex);

View File

@@ -68,20 +68,20 @@ enum video_range_type {
};
struct video_data {
uint8_t *data[MAX_AV_PLANES];
uint32_t linesize[MAX_AV_PLANES];
uint64_t timestamp;
uint8_t *data[MAX_AV_PLANES];
uint32_t linesize[MAX_AV_PLANES];
uint64_t timestamp;
};
struct video_output_info {
const char *name;
const char *name;
enum video_format format;
uint32_t fps_num;
uint32_t fps_den;
uint32_t width;
uint32_t height;
size_t cache_size;
uint32_t fps_num;
uint32_t fps_den;
uint32_t width;
uint32_t height;
size_t cache_size;
enum video_colorspace colorspace;
enum video_range_type range;
@@ -112,17 +112,28 @@ static inline bool format_is_yuv(enum video_format format)
static inline const char *get_video_format_name(enum video_format format)
{
switch (format) {
case VIDEO_FORMAT_I420: return "I420";
case VIDEO_FORMAT_NV12: return "NV12";
case VIDEO_FORMAT_YVYU: return "YVYU";
case VIDEO_FORMAT_YUY2: return "YUY2";
case VIDEO_FORMAT_UYVY: return "UYVY";
case VIDEO_FORMAT_RGBA: return "RGBA";
case VIDEO_FORMAT_BGRA: return "BGRA";
case VIDEO_FORMAT_BGRX: return "BGRX";
case VIDEO_FORMAT_I444: return "I444";
case VIDEO_FORMAT_Y800: return "Y800";
case VIDEO_FORMAT_BGR3: return "BGR3";
case VIDEO_FORMAT_I420:
return "I420";
case VIDEO_FORMAT_NV12:
return "NV12";
case VIDEO_FORMAT_YVYU:
return "YVYU";
case VIDEO_FORMAT_YUY2:
return "YUY2";
case VIDEO_FORMAT_UYVY:
return "UYVY";
case VIDEO_FORMAT_RGBA:
return "RGBA";
case VIDEO_FORMAT_BGRA:
return "BGRA";
case VIDEO_FORMAT_BGRX:
return "BGRX";
case VIDEO_FORMAT_I444:
return "I444";
case VIDEO_FORMAT_Y800:
return "Y800";
case VIDEO_FORMAT_BGR3:
return "BGR3";
case VIDEO_FORMAT_NONE:;
}
@@ -132,7 +143,8 @@ static inline const char *get_video_format_name(enum video_format format)
static inline const char *get_video_colorspace_name(enum video_colorspace cs)
{
switch (cs) {
case VIDEO_CS_709: return "709";
case VIDEO_CS_709:
return "709";
case VIDEO_CS_601:
case VIDEO_CS_DEFAULT:;
}
@@ -140,20 +152,19 @@ static inline const char *get_video_colorspace_name(enum video_colorspace cs)
return "601";
}
static inline enum video_range_type resolve_video_range(
enum video_format format, enum video_range_type range)
static inline enum video_range_type
resolve_video_range(enum video_format format, enum video_range_type range)
{
if (range == VIDEO_RANGE_DEFAULT) {
range = format_is_yuv(format)
? VIDEO_RANGE_PARTIAL
: VIDEO_RANGE_FULL;
range = format_is_yuv(format) ? VIDEO_RANGE_PARTIAL
: VIDEO_RANGE_FULL;
}
return range;
}
static inline const char *get_video_range_name(enum video_format format,
enum video_range_type range)
enum video_range_type range)
{
range = resolve_video_range(format, range);
return range == VIDEO_RANGE_FULL ? "Full" : "Partial";
@@ -168,9 +179,9 @@ enum video_scale_type {
};
struct video_scale_info {
enum video_format format;
uint32_t width;
uint32_t height;
enum video_format format;
uint32_t width;
uint32_t height;
enum video_range_type range;
enum video_colorspace colorspace;
};
@@ -178,30 +189,32 @@ struct video_scale_info {
EXPORT enum video_format video_format_from_fourcc(uint32_t fourcc);
EXPORT bool video_format_get_parameters(enum video_colorspace color_space,
enum video_range_type range, float matrix[16],
float min_range[3], float max_range[3]);
enum video_range_type range,
float matrix[16], float min_range[3],
float max_range[3]);
#define VIDEO_OUTPUT_SUCCESS 0
#define VIDEO_OUTPUT_SUCCESS 0
#define VIDEO_OUTPUT_INVALIDPARAM -1
#define VIDEO_OUTPUT_FAIL -2
#define VIDEO_OUTPUT_FAIL -2
EXPORT int video_output_open(video_t **video, struct video_output_info *info);
EXPORT void video_output_close(video_t *video);
EXPORT bool video_output_connect(video_t *video,
const struct video_scale_info *conversion,
void (*callback)(void *param, struct video_data *frame),
void *param);
EXPORT bool
video_output_connect(video_t *video, const struct video_scale_info *conversion,
void (*callback)(void *param, struct video_data *frame),
void *param);
EXPORT void video_output_disconnect(video_t *video,
void (*callback)(void *param, struct video_data *frame),
void *param);
void (*callback)(void *param,
struct video_data *frame),
void *param);
EXPORT bool video_output_active(const video_t *video);
EXPORT const struct video_output_info *video_output_get_info(
const video_t *video);
EXPORT const struct video_output_info *
video_output_get_info(const video_t *video);
EXPORT bool video_output_lock_frame(video_t *video, struct video_frame *frame,
int count, uint64_t timestamp);
int count, uint64_t timestamp);
EXPORT void video_output_unlock_frame(video_t *video);
EXPORT uint64_t video_output_get_frame_time(const video_t *video);
EXPORT void video_output_stop(video_t *video);
@@ -220,7 +233,6 @@ extern void video_output_dec_texture_encoders(video_t *video);
extern void video_output_inc_texture_frames(video_t *video);
extern void video_output_inc_texture_skipped_frames(video_t *video);
#ifdef __cplusplus
}
#endif

View File

@@ -37,69 +37,59 @@ static struct {
} format_info[] = {
{VIDEO_CS_601,
0.114f, 0.299f, {16, 16, 16}, {235, 240, 240},
{{16, 128, 128}, {0, 128, 128}},
0.114f,
0.299f,
{16, 16, 16},
{235, 240, 240},
{{16, 128, 128}, {0, 128, 128}},
#ifndef COMPUTE_MATRICES
{ 16.0f/255.0f, 16.0f/255.0f, 16.0f/255.0f},
{235.0f/255.0f, 240.0f/255.0f, 240.0f/255.0f},
{
{
1.164384f, 0.000000f, 1.596027f, -0.874202f,
1.164384f, -0.391762f, -0.812968f, 0.531668f,
1.164384f, 2.017232f, 0.000000f, -1.085631f,
0.000000f, 0.000000f, 0.000000f, 1.000000f
},
{
1.000000f, 0.000000f, 1.407520f, -0.706520f,
1.000000f, -0.345491f, -0.716948f, 0.533303f,
1.000000f, 1.778976f, 0.000000f, -0.892976f,
0.000000f, 0.000000f, 0.000000f, 1.000000f
}
}
{16.0f / 255.0f, 16.0f / 255.0f, 16.0f / 255.0f},
{235.0f / 255.0f, 240.0f / 255.0f, 240.0f / 255.0f},
{{1.164384f, 0.000000f, 1.596027f, -0.874202f, 1.164384f, -0.391762f,
-0.812968f, 0.531668f, 1.164384f, 2.017232f, 0.000000f, -1.085631f,
0.000000f, 0.000000f, 0.000000f, 1.000000f},
{1.000000f, 0.000000f, 1.407520f, -0.706520f, 1.000000f, -0.345491f,
-0.716948f, 0.533303f, 1.000000f, 1.778976f, 0.000000f, -0.892976f,
0.000000f, 0.000000f, 0.000000f, 1.000000f}}
#endif
},
{VIDEO_CS_709,
0.0722f, 0.2126f, {16, 16, 16}, {235, 240, 240},
{{16, 128, 128}, {0, 128, 128}},
0.0722f,
0.2126f,
{16, 16, 16},
{235, 240, 240},
{{16, 128, 128}, {0, 128, 128}},
#ifndef COMPUTE_MATRICES
{ 16.0f/255.0f, 16.0f/255.0f, 16.0f/255.0f},
{235.0f/255.0f, 240.0f/255.0f, 240.0f/255.0f},
{
{
1.164384f, 0.000000f, 1.792741f, -0.972945f,
1.164384f, -0.213249f, -0.532909f, 0.301483f,
1.164384f, 2.112402f, 0.000000f, -1.133402f,
0.000000f, 0.000000f, 0.000000f, 1.000000f
},
{
1.000000f, 0.000000f, 1.581000f, -0.793600f,
1.000000f, -0.188062f, -0.469967f, 0.330305f,
1.000000f, 1.862906f, 0.000000f, -0.935106f,
0.000000f, 0.000000f, 0.000000f, 1.000000f
}
}
{16.0f / 255.0f, 16.0f / 255.0f, 16.0f / 255.0f},
{235.0f / 255.0f, 240.0f / 255.0f, 240.0f / 255.0f},
{{1.164384f, 0.000000f, 1.792741f, -0.972945f, 1.164384f, -0.213249f,
-0.532909f, 0.301483f, 1.164384f, 2.112402f, 0.000000f, -1.133402f,
0.000000f, 0.000000f, 0.000000f, 1.000000f},
{1.000000f, 0.000000f, 1.581000f, -0.793600f, 1.000000f, -0.188062f,
-0.469967f, 0.330305f, 1.000000f, 1.862906f, 0.000000f, -0.935106f,
0.000000f, 0.000000f, 0.000000f, 1.000000f}}
#endif
},
};
#define NUM_FORMATS (sizeof(format_info)/sizeof(format_info[0]))
#define NUM_FORMATS (sizeof(format_info) / sizeof(format_info[0]))
#ifdef COMPUTE_MATRICES
static void log_matrix(float const matrix[16])
{
blog(LOG_DEBUG, "\n% f, % f, % f, % f" \
"\n% f, % f, % f, % f" \
"\n% f, % f, % f, % f" \
"\n% f, % f, % f, % f",
matrix[ 0], matrix[ 1], matrix[ 2], matrix[ 3],
matrix[ 4], matrix[ 5], matrix[ 6], matrix[ 7],
matrix[ 8], matrix[ 9], matrix[10], matrix[11],
matrix[12], matrix[13], matrix[14], matrix[15]);
blog(LOG_DEBUG,
"\n% f, % f, % f, % f"
"\n% f, % f, % f, % f"
"\n% f, % f, % f, % f"
"\n% f, % f, % f, % f",
matrix[0], matrix[1], matrix[2], matrix[3], matrix[4], matrix[5],
matrix[6], matrix[7], matrix[8], matrix[9], matrix[10], matrix[11],
matrix[12], matrix[13], matrix[14], matrix[15]);
}
static void initialize_matrix(float const Kb, float const Kr,
int const range_min[3], int const range_max[3],
int const black_levels[3], float matrix[16])
int const range_min[3], int const range_max[3],
int const black_levels[3], float matrix[16])
{
struct matrix3 color_matrix;
@@ -107,35 +97,29 @@ static void initialize_matrix(float const Kb, float const Kr,
int uvals = (range_max[1] - range_min[1]) / 2;
int vvals = (range_max[2] - range_min[2]) / 2;
vec3_set(&color_matrix.x, 255./yvals,
0.,
255./vvals * (1. - Kr));
vec3_set(&color_matrix.y, 255./yvals,
255./uvals * (Kb - 1.) * Kb / (1. - Kb - Kr),
255./vvals * (Kr - 1.) * Kr / (1. - Kb - Kr));
vec3_set(&color_matrix.z, 255./yvals,
255./uvals * (1. - Kb),
0.);
vec3_set(&color_matrix.x, 255. / yvals, 0., 255. / vvals * (1. - Kr));
vec3_set(&color_matrix.y, 255. / yvals,
255. / uvals * (Kb - 1.) * Kb / (1. - Kb - Kr),
255. / vvals * (Kr - 1.) * Kr / (1. - Kb - Kr));
vec3_set(&color_matrix.z, 255. / yvals, 255. / uvals * (1. - Kb), 0.);
struct vec3 offsets, multiplied;
vec3_set(&offsets,
-black_levels[0]/255.,
-black_levels[1]/255.,
-black_levels[2]/255.);
vec3_set(&offsets, -black_levels[0] / 255., -black_levels[1] / 255.,
-black_levels[2] / 255.);
vec3_rotate(&multiplied, &offsets, &color_matrix);
matrix[ 0] = color_matrix.x.x;
matrix[ 1] = color_matrix.x.y;
matrix[ 2] = color_matrix.x.z;
matrix[ 3] = multiplied.x;
matrix[0] = color_matrix.x.x;
matrix[1] = color_matrix.x.y;
matrix[2] = color_matrix.x.z;
matrix[3] = multiplied.x;
matrix[ 4] = color_matrix.y.x;
matrix[ 5] = color_matrix.y.y;
matrix[ 6] = color_matrix.y.z;
matrix[ 7] = multiplied.y;
matrix[4] = color_matrix.y.x;
matrix[5] = color_matrix.y.y;
matrix[6] = color_matrix.y.z;
matrix[7] = multiplied.y;
matrix[ 8] = color_matrix.z.x;
matrix[ 9] = color_matrix.z.y;
matrix[8] = color_matrix.z.x;
matrix[9] = color_matrix.z.y;
matrix[10] = color_matrix.z.z;
matrix[11] = multiplied.z;
@@ -147,26 +131,26 @@ static void initialize_matrix(float const Kb, float const Kr,
static void initialize_matrices()
{
static int range_min[] = { 0, 0, 0};
static int range_min[] = {0, 0, 0};
static int range_max[] = {255, 255, 255};
for (size_t i = 0; i < NUM_FORMATS; i++) {
initialize_matrix(format_info[i].Kb, format_info[i].Kr,
range_min, range_max,
format_info[i].black_levels[1],
format_info[i].matrix[1]);
range_min, range_max,
format_info[i].black_levels[1],
format_info[i].matrix[1]);
initialize_matrix(format_info[i].Kb, format_info[i].Kr,
format_info[i].range_min,
format_info[i].range_max,
format_info[i].black_levels[0],
format_info[i].matrix[0]);
format_info[i].range_min,
format_info[i].range_max,
format_info[i].black_levels[0],
format_info[i].matrix[0]);
for (int j = 0; j < 3; j++) {
format_info[i].float_range_min[j] =
format_info[i].range_min[j]/255.;
format_info[i].range_min[j] / 255.;
format_info[i].float_range_max[j] =
format_info[i].range_max[j]/255.;
format_info[i].range_max[j] / 255.;
}
}
}
@@ -178,8 +162,8 @@ static const float full_min[3] = {0.0f, 0.0f, 0.0f};
static const float full_max[3] = {1.0f, 1.0f, 1.0f};
bool video_format_get_parameters(enum video_colorspace color_space,
enum video_range_type range, float matrix[16],
float range_min[3], float range_max[3])
enum video_range_type range, float matrix[16],
float range_min[3], float range_max[3])
{
#ifdef COMPUTE_MATRICES
if (!matrices_initialized) {
@@ -196,7 +180,7 @@ bool video_format_get_parameters(enum video_colorspace color_space,
int full_range = range == VIDEO_RANGE_FULL ? 1 : 0;
memcpy(matrix, format_info[i].matrix[full_range],
sizeof(float) * 16);
sizeof(float) * 16);
if (range == VIDEO_RANGE_FULL) {
if (range_min)
@@ -208,11 +192,11 @@ bool video_format_get_parameters(enum video_colorspace color_space,
if (range_min)
memcpy(range_min, format_info[i].float_range_min,
sizeof(float) * 3);
sizeof(float) * 3);
if (range_max)
memcpy(range_max, format_info[i].float_range_max,
sizeof(float) * 3);
sizeof(float) * 3);
return true;
}

View File

@@ -25,22 +25,34 @@ struct video_scaler {
int src_height;
};
static inline enum AVPixelFormat get_ffmpeg_video_format(
enum video_format format)
static inline enum AVPixelFormat
get_ffmpeg_video_format(enum video_format format)
{
switch (format) {
case VIDEO_FORMAT_NONE: return AV_PIX_FMT_NONE;
case VIDEO_FORMAT_I420: return AV_PIX_FMT_YUV420P;
case VIDEO_FORMAT_NV12: return AV_PIX_FMT_NV12;
case VIDEO_FORMAT_YVYU: return AV_PIX_FMT_NONE;
case VIDEO_FORMAT_YUY2: return AV_PIX_FMT_YUYV422;
case VIDEO_FORMAT_UYVY: return AV_PIX_FMT_UYVY422;
case VIDEO_FORMAT_RGBA: return AV_PIX_FMT_RGBA;
case VIDEO_FORMAT_BGRA: return AV_PIX_FMT_BGRA;
case VIDEO_FORMAT_BGRX: return AV_PIX_FMT_BGRA;
case VIDEO_FORMAT_Y800: return AV_PIX_FMT_GRAY8;
case VIDEO_FORMAT_I444: return AV_PIX_FMT_YUV444P;
case VIDEO_FORMAT_BGR3: return AV_PIX_FMT_BGR24;
case VIDEO_FORMAT_NONE:
return AV_PIX_FMT_NONE;
case VIDEO_FORMAT_I420:
return AV_PIX_FMT_YUV420P;
case VIDEO_FORMAT_NV12:
return AV_PIX_FMT_NV12;
case VIDEO_FORMAT_YVYU:
return AV_PIX_FMT_NONE;
case VIDEO_FORMAT_YUY2:
return AV_PIX_FMT_YUYV422;
case VIDEO_FORMAT_UYVY:
return AV_PIX_FMT_UYVY422;
case VIDEO_FORMAT_RGBA:
return AV_PIX_FMT_RGBA;
case VIDEO_FORMAT_BGRA:
return AV_PIX_FMT_BGRA;
case VIDEO_FORMAT_BGRX:
return AV_PIX_FMT_BGRA;
case VIDEO_FORMAT_Y800:
return AV_PIX_FMT_GRAY8;
case VIDEO_FORMAT_I444:
return AV_PIX_FMT_YUV444P;
case VIDEO_FORMAT_BGR3:
return AV_PIX_FMT_BGR24;
}
return AV_PIX_FMT_NONE;
@@ -49,11 +61,16 @@ static inline enum AVPixelFormat get_ffmpeg_video_format(
static inline int get_ffmpeg_scale_type(enum video_scale_type type)
{
switch (type) {
case VIDEO_SCALE_DEFAULT: return SWS_FAST_BILINEAR;
case VIDEO_SCALE_POINT: return SWS_POINT;
case VIDEO_SCALE_FAST_BILINEAR: return SWS_FAST_BILINEAR;
case VIDEO_SCALE_BILINEAR: return SWS_BILINEAR | SWS_AREA;
case VIDEO_SCALE_BICUBIC: return SWS_BICUBIC;
case VIDEO_SCALE_DEFAULT:
return SWS_FAST_BILINEAR;
case VIDEO_SCALE_POINT:
return SWS_POINT;
case VIDEO_SCALE_FAST_BILINEAR:
return SWS_FAST_BILINEAR;
case VIDEO_SCALE_BILINEAR:
return SWS_BILINEAR | SWS_AREA;
case VIDEO_SCALE_BICUBIC:
return SWS_BICUBIC;
}
return SWS_POINT;
@@ -62,9 +79,12 @@ static inline int get_ffmpeg_scale_type(enum video_scale_type type)
static inline const int *get_ffmpeg_coeffs(enum video_colorspace cs)
{
switch (cs) {
case VIDEO_CS_DEFAULT: return sws_getCoefficients(SWS_CS_ITU601);
case VIDEO_CS_601: return sws_getCoefficients(SWS_CS_ITU601);
case VIDEO_CS_709: return sws_getCoefficients(SWS_CS_ITU709);
case VIDEO_CS_DEFAULT:
return sws_getCoefficients(SWS_CS_ITU601);
case VIDEO_CS_601:
return sws_getCoefficients(SWS_CS_ITU601);
case VIDEO_CS_709:
return sws_getCoefficients(SWS_CS_ITU709);
}
return sws_getCoefficients(SWS_CS_ITU601);
@@ -73,58 +93,59 @@ static inline const int *get_ffmpeg_coeffs(enum video_colorspace cs)
static inline int get_ffmpeg_range_type(enum video_range_type type)
{
switch (type) {
case VIDEO_RANGE_DEFAULT: return 0;
case VIDEO_RANGE_PARTIAL: return 0;
case VIDEO_RANGE_FULL: return 1;
case VIDEO_RANGE_DEFAULT:
return 0;
case VIDEO_RANGE_PARTIAL:
return 0;
case VIDEO_RANGE_FULL:
return 1;
}
return 0;
}
#define FIXED_1_0 (1<<16)
#define FIXED_1_0 (1 << 16)
int video_scaler_create(video_scaler_t **scaler_out,
const struct video_scale_info *dst,
const struct video_scale_info *src,
enum video_scale_type type)
const struct video_scale_info *dst,
const struct video_scale_info *src,
enum video_scale_type type)
{
enum AVPixelFormat format_src = get_ffmpeg_video_format(src->format);
enum AVPixelFormat format_dst = get_ffmpeg_video_format(dst->format);
int scale_type = get_ffmpeg_scale_type(type);
const int *coeff_src = get_ffmpeg_coeffs(src->colorspace);
const int *coeff_dst = get_ffmpeg_coeffs(dst->colorspace);
int range_src = get_ffmpeg_range_type(src->range);
int range_dst = get_ffmpeg_range_type(dst->range);
int scale_type = get_ffmpeg_scale_type(type);
const int *coeff_src = get_ffmpeg_coeffs(src->colorspace);
const int *coeff_dst = get_ffmpeg_coeffs(dst->colorspace);
int range_src = get_ffmpeg_range_type(src->range);
int range_dst = get_ffmpeg_range_type(dst->range);
struct video_scaler *scaler;
int ret;
if (!scaler_out)
return VIDEO_SCALER_FAILED;
if (format_src == AV_PIX_FMT_NONE ||
format_dst == AV_PIX_FMT_NONE)
if (format_src == AV_PIX_FMT_NONE || format_dst == AV_PIX_FMT_NONE)
return VIDEO_SCALER_BAD_CONVERSION;
scaler = bzalloc(sizeof(struct video_scaler));
scaler->src_height = src->height;
scaler->swscale = sws_getCachedContext(NULL,
src->width, src->height, format_src,
dst->width, dst->height, format_dst,
scale_type, NULL, NULL, NULL);
scaler->swscale = sws_getCachedContext(NULL, src->width, src->height,
format_src, dst->width,
dst->height, format_dst,
scale_type, NULL, NULL, NULL);
if (!scaler->swscale) {
blog(LOG_ERROR, "video_scaler_create: Could not create "
"swscale");
"swscale");
goto fail;
}
ret = sws_setColorspaceDetails(scaler->swscale,
coeff_src, range_src,
coeff_dst, range_dst,
0, FIXED_1_0, FIXED_1_0);
ret = sws_setColorspaceDetails(scaler->swscale, coeff_src, range_src,
coeff_dst, range_dst, 0, FIXED_1_0,
FIXED_1_0);
if (ret < 0) {
blog(LOG_DEBUG, "video_scaler_create: "
"sws_setColorspaceDetails failed, ignoring");
"sws_setColorspaceDetails failed, ignoring");
}
*scaler_out = scaler;
@@ -143,20 +164,20 @@ void video_scaler_destroy(video_scaler_t *scaler)
}
}
bool video_scaler_scale(video_scaler_t *scaler,
uint8_t *output[], const uint32_t out_linesize[],
const uint8_t *const input[], const uint32_t in_linesize[])
bool video_scaler_scale(video_scaler_t *scaler, uint8_t *output[],
const uint32_t out_linesize[],
const uint8_t *const input[],
const uint32_t in_linesize[])
{
if (!scaler)
return false;
int ret = sws_scale(scaler->swscale,
input, (const int *)in_linesize,
0, scaler->src_height,
output, (const int *)out_linesize);
int ret = sws_scale(scaler->swscale, input, (const int *)in_linesize, 0,
scaler->src_height, output,
(const int *)out_linesize);
if (ret <= 0) {
blog(LOG_ERROR, "video_scaler_scale: sws_scale failed: %d",
ret);
ret);
return false;
}

View File

@@ -27,19 +27,20 @@ extern "C" {
struct video_scaler;
typedef struct video_scaler video_scaler_t;
#define VIDEO_SCALER_SUCCESS 0
#define VIDEO_SCALER_SUCCESS 0
#define VIDEO_SCALER_BAD_CONVERSION -1
#define VIDEO_SCALER_FAILED -2
#define VIDEO_SCALER_FAILED -2
EXPORT int video_scaler_create(video_scaler_t **scaler,
const struct video_scale_info *dst,
const struct video_scale_info *src,
enum video_scale_type type);
const struct video_scale_info *dst,
const struct video_scale_info *src,
enum video_scale_type type);
EXPORT void video_scaler_destroy(video_scaler_t *scaler);
EXPORT bool video_scaler_scale(video_scaler_t *scaler,
uint8_t *output[], const uint32_t out_linesize[],
const uint8_t *const input[], const uint32_t in_linesize[]);
EXPORT bool video_scaler_scale(video_scaler_t *scaler, uint8_t *output[],
const uint32_t out_linesize[],
const uint8_t *const input[],
const uint32_t in_linesize[]);
#ifdef __cplusplus
}