Cleanup some warnings with ffmpeg/alffplay
This commit is contained in:
parent
b12cd77d32
commit
476e9d7522
@ -28,6 +28,11 @@
|
||||
#include <ratio>
|
||||
|
||||
extern "C" {
|
||||
#ifdef __GNUC__
|
||||
_Pragma("GCC diagnostic push")
|
||||
_Pragma("GCC diagnostic ignored \"-Wconversion\"")
|
||||
_Pragma("GCC diagnostic ignored \"-Wold-style-cast\"")
|
||||
#endif
|
||||
#include "libavcodec/avcodec.h"
|
||||
#include "libavformat/avformat.h"
|
||||
#include "libavformat/avio.h"
|
||||
@ -45,7 +50,13 @@ extern "C" {
|
||||
#include "libswscale/swscale.h"
|
||||
#include "libswresample/swresample.h"
|
||||
|
||||
constexpr auto AVNoPtsValue = AV_NOPTS_VALUE;
|
||||
constexpr auto AVErrorEOF = AVERROR_EOF;
|
||||
|
||||
struct SwsContext;
|
||||
#ifdef __GNUC__
|
||||
_Pragma("GCC diagnostic pop")
|
||||
#endif
|
||||
}
|
||||
|
||||
#include "SDL.h"
|
||||
@ -111,6 +122,7 @@ using microseconds = std::chrono::microseconds;
|
||||
using milliseconds = std::chrono::milliseconds;
|
||||
using seconds = std::chrono::seconds;
|
||||
using seconds_d64 = std::chrono::duration<double>;
|
||||
using std::chrono::duration_cast;
|
||||
|
||||
const std::string AppName{"alffplay"};
|
||||
|
||||
@ -503,8 +515,8 @@ nanoseconds AudioState::getClockNoLock()
|
||||
|
||||
nanoseconds pts{};
|
||||
if(status == AL_PLAYING || status == AL_PAUSED)
|
||||
pts = mDeviceStartTime + std::chrono::duration_cast<nanoseconds>(
|
||||
fixed32{offset[0] / mCodecCtx->sample_rate}) - nanoseconds{offset[1]};
|
||||
pts = mDeviceStartTime - nanoseconds{offset[1]} +
|
||||
duration_cast<nanoseconds>(fixed32{offset[0] / mCodecCtx->sample_rate});
|
||||
else
|
||||
{
|
||||
/* If the source is stopped, the pts of the next sample to be heard
|
||||
@ -562,8 +574,7 @@ nanoseconds AudioState::getClockNoLock()
|
||||
if(status != AL_STOPPED)
|
||||
{
|
||||
pts -= AudioBufferTime*queued;
|
||||
pts += std::chrono::duration_cast<nanoseconds>(
|
||||
fixed32{offset[0] / mCodecCtx->sample_rate});
|
||||
pts += duration_cast<nanoseconds>(fixed32{offset[0] / mCodecCtx->sample_rate});
|
||||
}
|
||||
/* Don't offset by the latency if the source isn't playing. */
|
||||
if(status == AL_PLAYING)
|
||||
@ -604,7 +615,7 @@ bool AudioState::startPlayback()
|
||||
int64_t srctimes[2]{0,0};
|
||||
alGetSourcei64vSOFT(mSource, AL_SAMPLE_OFFSET_CLOCK_SOFT, srctimes);
|
||||
auto device_time = nanoseconds{srctimes[1]};
|
||||
auto src_offset = std::chrono::duration_cast<nanoseconds>(fixed32{srctimes[0]}) /
|
||||
auto src_offset = duration_cast<nanoseconds>(fixed32{srctimes[0]}) /
|
||||
mCodecCtx->sample_rate;
|
||||
|
||||
/* The mixer may have ticked and incremented the device time and sample
|
||||
@ -651,7 +662,7 @@ int AudioState::getSync()
|
||||
|
||||
/* Constrain the per-update difference to avoid exceedingly large skips */
|
||||
diff = std::min<nanoseconds>(diff, AudioSampleCorrectionMax);
|
||||
return static_cast<int>(std::chrono::duration_cast<seconds>(diff*mCodecCtx->sample_rate).count());
|
||||
return static_cast<int>(duration_cast<seconds>(diff*mCodecCtx->sample_rate).count());
|
||||
}
|
||||
|
||||
int AudioState::decodeFrame()
|
||||
@ -663,7 +674,7 @@ int AudioState::decodeFrame()
|
||||
mPackets.sendTo(mCodecCtx.get());
|
||||
if(ret != 0)
|
||||
{
|
||||
if(ret == AVERROR_EOF) break;
|
||||
if(ret == AVErrorEOF) break;
|
||||
std::cerr<< "Failed to receive frame: "<<ret <<std::endl;
|
||||
continue;
|
||||
}
|
||||
@ -672,10 +683,9 @@ int AudioState::decodeFrame()
|
||||
continue;
|
||||
|
||||
/* If provided, update w/ pts */
|
||||
if(mDecodedFrame->best_effort_timestamp != AV_NOPTS_VALUE)
|
||||
mCurrentPts = std::chrono::duration_cast<nanoseconds>(
|
||||
seconds_d64{av_q2d(mStream->time_base)*mDecodedFrame->best_effort_timestamp}
|
||||
);
|
||||
if(mDecodedFrame->best_effort_timestamp != AVNoPtsValue)
|
||||
mCurrentPts = duration_cast<nanoseconds>(seconds_d64{av_q2d(mStream->time_base) *
|
||||
static_cast<double>(mDecodedFrame->best_effort_timestamp)});
|
||||
|
||||
if(mDecodedFrame->nb_samples > mSamplesMax)
|
||||
{
|
||||
@ -819,7 +829,7 @@ void AudioState::readAudio(int sample_skip)
|
||||
if(woffset == mBufferDataSize)
|
||||
woffset = 0;
|
||||
mWritePos.store(woffset, std::memory_order_release);
|
||||
mSamplesPos += rem;
|
||||
mSamplesPos += static_cast<int>(rem);
|
||||
mCurrentPts += nanoseconds{seconds{rem}} / mCodecCtx->sample_rate;
|
||||
continue;
|
||||
}
|
||||
@ -1225,13 +1235,13 @@ int AudioState::handler()
|
||||
{
|
||||
fprintf(stderr, "Failed to set buffer callback\n");
|
||||
alSourcei(mSource, AL_BUFFER, 0);
|
||||
buffer_len = static_cast<int>(std::chrono::duration_cast<seconds>(
|
||||
mCodecCtx->sample_rate * AudioBufferTime).count() * mFrameSize);
|
||||
buffer_len = static_cast<int>(duration_cast<seconds>(mCodecCtx->sample_rate *
|
||||
AudioBufferTime).count() * mFrameSize);
|
||||
}
|
||||
else
|
||||
{
|
||||
mBufferDataSize = static_cast<size_t>(std::chrono::duration_cast<seconds>(
|
||||
mCodecCtx->sample_rate * AudioBufferTotalTime).count()) * mFrameSize;
|
||||
mBufferDataSize = static_cast<size_t>(duration_cast<seconds>(mCodecCtx->sample_rate *
|
||||
AudioBufferTotalTime).count()) * mFrameSize;
|
||||
mBufferData.reset(new uint8_t[mBufferDataSize]);
|
||||
mReadPos.store(0, std::memory_order_relaxed);
|
||||
mWritePos.store(0, std::memory_order_relaxed);
|
||||
@ -1243,15 +1253,15 @@ int AudioState::handler()
|
||||
}
|
||||
else
|
||||
#endif
|
||||
buffer_len = static_cast<int>(std::chrono::duration_cast<seconds>(
|
||||
mCodecCtx->sample_rate * AudioBufferTime).count() * mFrameSize);
|
||||
buffer_len = static_cast<int>(duration_cast<seconds>(mCodecCtx->sample_rate *
|
||||
AudioBufferTime).count() * mFrameSize);
|
||||
if(buffer_len > 0)
|
||||
samples = av_malloc(static_cast<ALuint>(buffer_len));
|
||||
|
||||
/* Prefill the codec buffer. */
|
||||
do {
|
||||
const int ret{mPackets.sendTo(mCodecCtx.get())};
|
||||
if(ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
|
||||
if(ret == AVERROR(EAGAIN) || ret == AVErrorEOF)
|
||||
break;
|
||||
} while(1);
|
||||
|
||||
@ -1309,7 +1319,7 @@ int AudioState::handler()
|
||||
if(!got_audio) break;
|
||||
|
||||
const ALuint bufid{mBuffers[mBufferIdx]};
|
||||
mBufferIdx = (mBufferIdx+1) % mBuffers.size();
|
||||
mBufferIdx = static_cast<ALuint>((mBufferIdx+1) % mBuffers.size());
|
||||
|
||||
alBufferData(bufid, mFormat, samples, buffer_len, mCodecCtx->sample_rate);
|
||||
alSourceQueueBuffers(mSource, 1, &bufid);
|
||||
@ -1575,7 +1585,7 @@ int VideoState::handler()
|
||||
/* Prefill the codec buffer. */
|
||||
do {
|
||||
const int ret{mPackets.sendTo(mCodecCtx.get())};
|
||||
if(ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
|
||||
if(ret == AVERROR(EAGAIN) || ret == AVErrorEOF)
|
||||
break;
|
||||
} while(1);
|
||||
|
||||
@ -1597,21 +1607,21 @@ int VideoState::handler()
|
||||
mPackets.sendTo(mCodecCtx.get());
|
||||
if(ret != 0)
|
||||
{
|
||||
if(ret == AVERROR_EOF) break;
|
||||
if(ret == AVErrorEOF) break;
|
||||
std::cerr<< "Failed to receive frame: "<<ret <<std::endl;
|
||||
continue;
|
||||
}
|
||||
|
||||
/* Get the PTS for this frame. */
|
||||
if(decoded_frame->best_effort_timestamp != AV_NOPTS_VALUE)
|
||||
current_pts = std::chrono::duration_cast<nanoseconds>(
|
||||
seconds_d64{av_q2d(mStream->time_base)*decoded_frame->best_effort_timestamp});
|
||||
if(decoded_frame->best_effort_timestamp != AVNoPtsValue)
|
||||
current_pts = duration_cast<nanoseconds>(seconds_d64{av_q2d(mStream->time_base) *
|
||||
static_cast<double>(decoded_frame->best_effort_timestamp)});
|
||||
vp->mPts = current_pts;
|
||||
|
||||
/* Update the video clock to the next expected PTS. */
|
||||
auto frame_delay = av_q2d(mCodecCtx->time_base);
|
||||
frame_delay += decoded_frame->repeat_pict * (frame_delay * 0.5);
|
||||
current_pts += std::chrono::duration_cast<nanoseconds>(seconds_d64{frame_delay});
|
||||
current_pts += duration_cast<nanoseconds>(seconds_d64{frame_delay});
|
||||
|
||||
/* Put the frame in the queue to be loaded into a texture and displayed
|
||||
* by the rendering thread.
|
||||
@ -1839,7 +1849,6 @@ std::ostream &operator<<(std::ostream &os, const PrettyTime &rhs)
|
||||
{
|
||||
using hours = std::chrono::hours;
|
||||
using minutes = std::chrono::minutes;
|
||||
using std::chrono::duration_cast;
|
||||
|
||||
seconds t{rhs.mTime};
|
||||
if(t.count() < 0)
|
||||
|
Loading…
x
Reference in New Issue
Block a user