clang-format: Apply formatting

Code submissions have continually suffered from formatting
inconsistencies that constantly have to be addressed.  Using
clang-format simplifies this by making code formatting more consistent,
and allows automation of the code formatting so that maintainers can
focus more on the code itself instead of code formatting.
This commit is contained in:
jp9000
2019-06-22 22:13:45 -07:00
parent 53615ee10f
commit f53df7da64
567 changed files with 34068 additions and 32903 deletions

View File

@@ -65,12 +65,11 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include <intrin.h>
#define do_log(level, format, ...) \
blog(level, "[qsv encoder: '%s'] " format, \
"msdk_impl", ##__VA_ARGS__)
blog(level, "[qsv encoder: '%s'] " format, "msdk_impl", ##__VA_ARGS__)
mfxIMPL impl = MFX_IMPL_HARDWARE_ANY;
mfxVersion ver = {{0, 1}}; // for backward compatibility
std::atomic<bool> is_active{false};
mfxIMPL impl = MFX_IMPL_HARDWARE_ANY;
mfxVersion ver = {{0, 1}}; // for backward compatibility
std::atomic<bool> is_active{false};
void qsv_encoder_version(unsigned short *major, unsigned short *minor)
{
@@ -86,62 +85,77 @@ qsv_t *qsv_encoder_open(qsv_param_t *pParams)
mfxStatus sts = pEncoder->Open(pParams);
if (sts != MFX_ERR_NONE) {
#define WARN_ERR_IMPL(err, str, err_name) case err: \
do_log(LOG_WARNING, str " (" err_name ")"); break;
#define WARN_ERR_IMPL(err, str, err_name) \
case err: \
do_log(LOG_WARNING, str " (" err_name ")"); \
break;
#define WARN_ERR(err, str) WARN_ERR_IMPL(err, str, #err)
switch (sts) {
WARN_ERR(MFX_ERR_UNKNOWN, "Unknown QSV error");
WARN_ERR(MFX_ERR_NOT_INITIALIZED,
"Member functions called without initialization");
WARN_ERR(MFX_ERR_INVALID_HANDLE,
"Invalid session or MemId handle");
WARN_ERR(MFX_ERR_NULL_PTR,
"NULL pointer in the input or output arguments");
WARN_ERR(MFX_ERR_UNDEFINED_BEHAVIOR, "Undefined behavior");
WARN_ERR(MFX_ERR_NOT_ENOUGH_BUFFER,
"Insufficient buffer for input or output.");
WARN_ERR(MFX_ERR_NOT_FOUND,
"Specified object/item/sync point not found.");
WARN_ERR(MFX_ERR_MEMORY_ALLOC, "Gailed to allocate memory");
WARN_ERR(MFX_ERR_LOCK_MEMORY,
"failed to lock the memory block "
"(external allocator).");
WARN_ERR(MFX_ERR_UNSUPPORTED,
"Unsupported configurations, parameters, or features");
WARN_ERR(MFX_ERR_INVALID_VIDEO_PARAM,
"Incompatible video parameters detected");
WARN_ERR(MFX_WRN_VIDEO_PARAM_CHANGED,
"The decoder detected a new sequence header in the "
"bitstream. Video parameters may have changed.");
WARN_ERR(MFX_WRN_VALUE_NOT_CHANGED,
"The parameter has been clipped to its value range");
WARN_ERR(MFX_WRN_OUT_OF_RANGE,
"The parameter is out of valid value range");
WARN_ERR(MFX_WRN_INCOMPATIBLE_VIDEO_PARAM,
"Incompatible video parameters detected");
WARN_ERR(MFX_WRN_FILTER_SKIPPED,
"The SDK VPP has skipped one or more optional filters "
"requested by the application");
WARN_ERR(MFX_ERR_ABORTED, "The asynchronous operation aborted");
WARN_ERR(MFX_ERR_MORE_DATA,
"Need more bitstream at decoding input, encoding "
"input, or video processing input frames");
WARN_ERR(MFX_ERR_MORE_SURFACE, "Need more frame surfaces at "
"decoding or video processing output");
WARN_ERR(MFX_ERR_MORE_BITSTREAM,
"Need more bitstream buffers at the encoding output");
WARN_ERR(MFX_WRN_IN_EXECUTION,
"Synchronous operation still running");
WARN_ERR(MFX_ERR_DEVICE_FAILED,
"Hardware device returned unexpected errors");
WARN_ERR(MFX_ERR_DEVICE_LOST,"Hardware device was lost");
WARN_ERR(MFX_WRN_DEVICE_BUSY,
"Hardware device is currently busy");
WARN_ERR(MFX_WRN_PARTIAL_ACCELERATION,
"The hardware does not support the specified "
"configuration. Encoding, decoding, or video "
"processing may be partially accelerated");
WARN_ERR(MFX_ERR_UNKNOWN, "Unknown QSV error");
WARN_ERR(
MFX_ERR_NOT_INITIALIZED,
"Member functions called without initialization");
WARN_ERR(MFX_ERR_INVALID_HANDLE,
"Invalid session or MemId handle");
WARN_ERR(
MFX_ERR_NULL_PTR,
"NULL pointer in the input or output arguments");
WARN_ERR(MFX_ERR_UNDEFINED_BEHAVIOR,
"Undefined behavior");
WARN_ERR(MFX_ERR_NOT_ENOUGH_BUFFER,
"Insufficient buffer for input or output.");
WARN_ERR(MFX_ERR_NOT_FOUND,
"Specified object/item/sync point not found.");
WARN_ERR(MFX_ERR_MEMORY_ALLOC,
"Gailed to allocate memory");
WARN_ERR(MFX_ERR_LOCK_MEMORY,
"failed to lock the memory block "
"(external allocator).");
WARN_ERR(
MFX_ERR_UNSUPPORTED,
"Unsupported configurations, parameters, or features");
WARN_ERR(MFX_ERR_INVALID_VIDEO_PARAM,
"Incompatible video parameters detected");
WARN_ERR(
MFX_WRN_VIDEO_PARAM_CHANGED,
"The decoder detected a new sequence header in the "
"bitstream. Video parameters may have changed.");
WARN_ERR(
MFX_WRN_VALUE_NOT_CHANGED,
"The parameter has been clipped to its value range");
WARN_ERR(MFX_WRN_OUT_OF_RANGE,
"The parameter is out of valid value range");
WARN_ERR(MFX_WRN_INCOMPATIBLE_VIDEO_PARAM,
"Incompatible video parameters detected");
WARN_ERR(
MFX_WRN_FILTER_SKIPPED,
"The SDK VPP has skipped one or more optional filters "
"requested by the application");
WARN_ERR(MFX_ERR_ABORTED,
"The asynchronous operation aborted");
WARN_ERR(
MFX_ERR_MORE_DATA,
"Need more bitstream at decoding input, encoding "
"input, or video processing input frames");
WARN_ERR(MFX_ERR_MORE_SURFACE,
"Need more frame surfaces at "
"decoding or video processing output");
WARN_ERR(
MFX_ERR_MORE_BITSTREAM,
"Need more bitstream buffers at the encoding output");
WARN_ERR(MFX_WRN_IN_EXECUTION,
"Synchronous operation still running");
WARN_ERR(MFX_ERR_DEVICE_FAILED,
"Hardware device returned unexpected errors");
WARN_ERR(MFX_ERR_DEVICE_LOST,
"Hardware device was lost");
WARN_ERR(MFX_WRN_DEVICE_BUSY,
"Hardware device is currently busy");
WARN_ERR(MFX_WRN_PARTIAL_ACCELERATION,
"The hardware does not support the specified "
"configuration. Encoding, decoding, or video "
"processing may be partially accelerated");
}
#undef WARN_ERR
@@ -153,11 +167,11 @@ qsv_t *qsv_encoder_open(qsv_param_t *pParams)
return NULL;
}
return (qsv_t *) pEncoder;
return (qsv_t *)pEncoder;
}
int qsv_encoder_headers(qsv_t *pContext, uint8_t **pSPS, uint8_t **pPPS,
uint16_t *pnSPS, uint16_t *pnPPS)
uint16_t *pnSPS, uint16_t *pnPPS)
{
QSV_Encoder_Internal *pEncoder = (QSV_Encoder_Internal *)pContext;
pEncoder->GetSPSPPS(pSPS, pPPS, pnSPS, pnPPS);
@@ -165,16 +179,16 @@ int qsv_encoder_headers(qsv_t *pContext, uint8_t **pSPS, uint8_t **pPPS,
return 0;
}
int qsv_encoder_encode(qsv_t * pContext, uint64_t ts, uint8_t *pDataY,
uint8_t *pDataUV, uint32_t strideY, uint32_t strideUV,
mfxBitstream **pBS)
int qsv_encoder_encode(qsv_t *pContext, uint64_t ts, uint8_t *pDataY,
uint8_t *pDataUV, uint32_t strideY, uint32_t strideUV,
mfxBitstream **pBS)
{
QSV_Encoder_Internal *pEncoder = (QSV_Encoder_Internal *)pContext;
mfxStatus sts = MFX_ERR_NONE;
if (pDataY != NULL && pDataUV != NULL)
sts = pEncoder->Encode(ts, pDataY, pDataUV, strideY, strideUV,
pBS);
pBS);
if (sts == MFX_ERR_NONE)
return 0;
@@ -232,9 +246,9 @@ enum qsv_cpu_platform qsv_get_cpu_platform()
__cpuid(cpuInfo, 0);
string vendor;
vendor += string((char*)&cpuInfo[1], 4);
vendor += string((char*)&cpuInfo[3], 4);
vendor += string((char*)&cpuInfo[2], 4);
vendor += string((char *)&cpuInfo[1], 4);
vendor += string((char *)&cpuInfo[3], 4);
vendor += string((char *)&cpuInfo[2], 4);
if (vendor != "GenuineIntel")
return QSV_CPU_PLATFORM_UNKNOWN;
@@ -248,8 +262,7 @@ enum qsv_cpu_platform qsv_get_cpu_platform()
if (family != 6)
return QSV_CPU_PLATFORM_UNKNOWN;
switch (model)
{
switch (model) {
case 0x1C:
case 0x26:
case 0x27:

View File

@@ -70,33 +70,16 @@ struct qsv_rate_control_info {
};
static const struct qsv_rate_control_info qsv_ratecontrols[] = {
{"CBR", false},
{"VBR", false},
{"VCM", true},
{"CQP", false},
{"AVBR", false},
{"ICQ", true},
{"LA_ICQ", true},
{"LA", true},
{0, false}
};
static const char * const qsv_profile_names[] = {
"high",
"main",
"baseline",
0
};
static const char * const qsv_usage_names[] = {
"quality",
"balanced",
"speed",
0
};
{"CBR", false}, {"VBR", false}, {"VCM", true},
{"CQP", false}, {"AVBR", false}, {"ICQ", true},
{"LA_ICQ", true}, {"LA", true}, {0, false}};
static const char *const qsv_profile_names[] = {"high", "main", "baseline", 0};
static const char *const qsv_usage_names[] = {"quality", "balanced", "speed",
0};
typedef struct qsv_t qsv_t;
typedef struct
{
typedef struct {
mfxU16 nTargetUsage; /* 1 through 7, 1 being best quality and 7
being the best speed */
mfxU16 nWidth; /* source picture width */
@@ -134,14 +117,14 @@ int qsv_encoder_close(qsv_t *);
int qsv_param_parse(qsv_param_t *, const char *name, const char *value);
int qsv_param_apply_profile(qsv_param_t *, const char *profile);
int qsv_param_default_preset(qsv_param_t *, const char *preset,
const char *tune);
const char *tune);
int qsv_encoder_reconfig(qsv_t *, qsv_param_t *);
void qsv_encoder_version(unsigned short *major, unsigned short *minor);
qsv_t *qsv_encoder_open( qsv_param_t * );
qsv_t *qsv_encoder_open(qsv_param_t *);
int qsv_encoder_encode(qsv_t *, uint64_t, uint8_t *, uint8_t *, uint32_t,
uint32_t, mfxBitstream **pBS);
uint32_t, mfxBitstream **pBS);
int qsv_encoder_headers(qsv_t *, uint8_t **pSPS, uint8_t **pPPS,
uint16_t *pnSPS, uint16_t *pnPPS);
uint16_t *pnSPS, uint16_t *pnPPS);
enum qsv_cpu_platform qsv_get_cpu_platform();
#ifdef __cplusplus

View File

@@ -62,26 +62,25 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include <obs-module.h>
#define do_log(level, format, ...) \
blog(level, "[qsv encoder: '%s'] " format, \
"msdk_impl", ##__VA_ARGS__)
blog(level, "[qsv encoder: '%s'] " format, "msdk_impl", ##__VA_ARGS__)
#define warn(format, ...) do_log(LOG_WARNING, format, ##__VA_ARGS__)
#define info(format, ...) do_log(LOG_INFO, format, ##__VA_ARGS__)
#define debug(format, ...) do_log(LOG_DEBUG, format, ##__VA_ARGS__)
#define warn(format, ...) do_log(LOG_WARNING, format, ##__VA_ARGS__)
#define info(format, ...) do_log(LOG_INFO, format, ##__VA_ARGS__)
#define debug(format, ...) do_log(LOG_DEBUG, format, ##__VA_ARGS__)
mfxHDL QSV_Encoder_Internal::g_DX_Handle = NULL;
mfxU16 QSV_Encoder_Internal::g_numEncodersOpen = 0;
QSV_Encoder_Internal::QSV_Encoder_Internal(mfxIMPL& impl, mfxVersion& version) :
m_pmfxSurfaces(NULL),
m_pmfxENC(NULL),
m_nSPSBufferSize(100),
m_nPPSBufferSize(100),
m_nTaskPool(0),
m_pTaskPool(NULL),
m_nTaskIdx(0),
m_nFirstSyncTask(0),
m_outBitstream()
QSV_Encoder_Internal::QSV_Encoder_Internal(mfxIMPL &impl, mfxVersion &version)
: m_pmfxSurfaces(NULL),
m_pmfxENC(NULL),
m_nSPSBufferSize(100),
m_nPPSBufferSize(100),
m_nTaskPool(0),
m_pTaskPool(NULL),
m_nTaskIdx(0),
m_nFirstSyncTask(0),
m_outBitstream()
{
mfxIMPL tempImpl;
mfxStatus sts;
@@ -103,17 +102,16 @@ QSV_Encoder_Internal::QSV_Encoder_Internal(mfxIMPL& impl, mfxVersion& version) :
m_bUseD3D11 = true;
if (m_bUseD3D11)
blog(LOG_INFO, "\timpl: D3D11\n"
"\tsurf: D3D11");
"\tsurf: D3D11");
else
blog(LOG_INFO, "\timpl: D3D11\n"
"\tsurf: SysMem");
"\tsurf: SysMem");
m_impl = tempImpl;
m_ver = version;
return;
}
}
else if (m_bD3D9HACK) {
} else if (m_bD3D9HACK) {
tempImpl = impl | MFX_IMPL_VIA_D3D9;
sts = m_session.Init(tempImpl, &version);
if (sts == MFX_ERR_NONE) {
@@ -137,12 +135,11 @@ QSV_Encoder_Internal::QSV_Encoder_Internal(mfxIMPL& impl, mfxVersion& version) :
m_session.Close();
blog(LOG_INFO, "\timpl: D3D09\n"
"\tsurf: SysMem");
"\tsurf: SysMem");
m_impl = tempImpl;
m_ver = version;
}
}
QSV_Encoder_Internal::~QSV_Encoder_Internal()
@@ -151,20 +148,21 @@ QSV_Encoder_Internal::~QSV_Encoder_Internal()
ClearData();
}
mfxStatus QSV_Encoder_Internal::Open(qsv_param_t * pParams)
mfxStatus QSV_Encoder_Internal::Open(qsv_param_t *pParams)
{
mfxStatus sts = MFX_ERR_NONE;
if (m_bUseD3D11)
// Use D3D11 surface
sts = Initialize(m_impl, m_ver, &m_session, &m_mfxAllocator, &g_DX_Handle, false, false);
sts = Initialize(m_impl, m_ver, &m_session, &m_mfxAllocator,
&g_DX_Handle, false, false);
else if (m_bD3D9HACK)
// Use hack
sts = Initialize(m_impl, m_ver, &m_session, &m_mfxAllocator, &g_DX_Handle, false, true);
sts = Initialize(m_impl, m_ver, &m_session, &m_mfxAllocator,
&g_DX_Handle, false, true);
else
sts = Initialize(m_impl, m_ver, &m_session, NULL);
MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts);
m_pmfxENC = new MFXVideoENCODE(m_session);
@@ -193,8 +191,7 @@ mfxStatus QSV_Encoder_Internal::Open(qsv_param_t * pParams)
return sts;
}
bool QSV_Encoder_Internal::InitParams(qsv_param_t * pParams)
bool QSV_Encoder_Internal::InitParams(qsv_param_t *pParams)
{
memset(&m_mfxEncParams, 0, sizeof(m_mfxEncParams));
@@ -248,10 +245,11 @@ bool QSV_Encoder_Internal::InitParams(qsv_param_t * pParams)
}
m_mfxEncParams.AsyncDepth = pParams->nAsyncDepth;
m_mfxEncParams.mfx.GopPicSize = (mfxU16)(pParams->nKeyIntSec *
pParams->nFpsNum / (float)pParams->nFpsDen);
m_mfxEncParams.mfx.GopPicSize =
(mfxU16)(pParams->nKeyIntSec * pParams->nFpsNum /
(float)pParams->nFpsDen);
static mfxExtBuffer* extendedBuffers[2];
static mfxExtBuffer *extendedBuffers[2];
int iBuffers = 0;
if (pParams->nAsyncDepth == 1) {
m_mfxEncParams.mfx.NumRefFrame = 1;
@@ -261,9 +259,8 @@ bool QSV_Encoder_Internal::InitParams(qsv_param_t * pParams)
m_co.Header.BufferId = MFX_EXTBUFF_CODING_OPTION;
m_co.Header.BufferSz = sizeof(mfxExtCodingOption);
m_co.MaxDecFrameBuffering = 1;
extendedBuffers[iBuffers++] = (mfxExtBuffer*)&m_co;
}
else
extendedBuffers[iBuffers++] = (mfxExtBuffer *)&m_co;
} else
m_mfxEncParams.mfx.GopRefDist = pParams->nbFrames + 1;
if (pParams->nRateControl == MFX_RATECONTROL_LA_ICQ ||
@@ -273,7 +270,7 @@ bool QSV_Encoder_Internal::InitParams(qsv_param_t * pParams)
m_co2.Header.BufferId = MFX_EXTBUFF_CODING_OPTION;
m_co2.Header.BufferSz = sizeof(m_co2);
m_co2.LookAheadDepth = pParams->nLADEPTH;
extendedBuffers[iBuffers++] = (mfxExtBuffer*)& m_co2;
extendedBuffers[iBuffers++] = (mfxExtBuffer *)&m_co2;
}
if (iBuffers > 0) {
@@ -311,7 +308,7 @@ mfxStatus QSV_Encoder_Internal::AllocateSurfaces()
// Allocate required surfaces
if (m_bUseD3D11 || m_bD3D9HACK) {
sts = m_mfxAllocator.Alloc(m_mfxAllocator.pthis, &EncRequest,
&m_mfxResponse);
&m_mfxResponse);
MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts);
m_nSurfNum = m_mfxResponse.NumFrameActual;
@@ -323,15 +320,14 @@ mfxStatus QSV_Encoder_Internal::AllocateSurfaces()
m_pmfxSurfaces[i] = new mfxFrameSurface1;
memset(m_pmfxSurfaces[i], 0, sizeof(mfxFrameSurface1));
memcpy(&(m_pmfxSurfaces[i]->Info),
&(m_mfxEncParams.mfx.FrameInfo),
sizeof(mfxFrameInfo));
&(m_mfxEncParams.mfx.FrameInfo),
sizeof(mfxFrameInfo));
m_pmfxSurfaces[i]->Data.MemId = m_mfxResponse.mids[i];
}
}
else {
} else {
mfxU16 width = (mfxU16)MSDK_ALIGN32(EncRequest.Info.Width);
mfxU16 height = (mfxU16)MSDK_ALIGN32(EncRequest.Info.Height);
mfxU8 bitsPerPixel = 12;
mfxU8 bitsPerPixel = 12;
mfxU32 surfaceSize = width * height * bitsPerPixel / 8;
m_nSurfNum = EncRequest.NumFrameSuggested;
@@ -340,13 +336,14 @@ mfxStatus QSV_Encoder_Internal::AllocateSurfaces()
m_pmfxSurfaces[i] = new mfxFrameSurface1;
memset(m_pmfxSurfaces[i], 0, sizeof(mfxFrameSurface1));
memcpy(&(m_pmfxSurfaces[i]->Info),
&(m_mfxEncParams.mfx.FrameInfo),
sizeof(mfxFrameInfo));
&(m_mfxEncParams.mfx.FrameInfo),
sizeof(mfxFrameInfo));
mfxU8* pSurface = (mfxU8*) new mfxU8[surfaceSize];
mfxU8 *pSurface = (mfxU8 *)new mfxU8[surfaceSize];
m_pmfxSurfaces[i]->Data.Y = pSurface;
m_pmfxSurfaces[i]->Data.U = pSurface + width * height;
m_pmfxSurfaces[i]->Data.V = pSurface + width * height + 1;
m_pmfxSurfaces[i]->Data.V =
pSurface + width * height + 1;
m_pmfxSurfaces[i]->Data.Pitch = width;
}
}
@@ -364,8 +361,8 @@ mfxStatus QSV_Encoder_Internal::GetVideoParam()
opt.Header.BufferId = MFX_EXTBUFF_CODING_OPTION_SPSPPS;
opt.Header.BufferSz = sizeof(mfxExtCodingOptionSPSPPS);
static mfxExtBuffer* extendedBuffers[1];
extendedBuffers[0] = (mfxExtBuffer*)& opt;
static mfxExtBuffer *extendedBuffers[1];
extendedBuffers[0] = (mfxExtBuffer *)&opt;
m_parameter.ExtParam = extendedBuffers;
m_parameter.NumExtParam = 1;
@@ -384,7 +381,7 @@ mfxStatus QSV_Encoder_Internal::GetVideoParam()
}
void QSV_Encoder_Internal::GetSPSPPS(mfxU8 **pSPSBuf, mfxU8 **pPPSBuf,
mfxU16 *pnSPSBuf, mfxU16 *pnPPSBuf)
mfxU16 *pnSPSBuf, mfxU16 *pnPPSBuf)
{
*pSPSBuf = m_SPSBuffer;
*pPPSBuf = m_PPSBuffer;
@@ -409,7 +406,7 @@ mfxStatus QSV_Encoder_Internal::InitBitstream()
m_pTaskPool[i].mfxBS.DataLength = 0;
MSDK_CHECK_POINTER(m_pTaskPool[i].mfxBS.Data,
MFX_ERR_MEMORY_ALLOC);
MFX_ERR_MEMORY_ALLOC);
}
memset(&m_outBitstream, 0, sizeof(mfxBitstream));
@@ -424,21 +421,18 @@ mfxStatus QSV_Encoder_Internal::InitBitstream()
}
mfxStatus QSV_Encoder_Internal::LoadNV12(mfxFrameSurface1 *pSurface,
uint8_t *pDataY, uint8_t *pDataUV, uint32_t strideY,
uint32_t strideUV)
uint8_t *pDataY, uint8_t *pDataUV,
uint32_t strideY, uint32_t strideUV)
{
mfxU16 w, h, i, pitch;
mfxU8* ptr;
mfxFrameInfo* pInfo = &pSurface->Info;
mfxFrameData* pData = &pSurface->Data;
mfxU8 *ptr;
mfxFrameInfo *pInfo = &pSurface->Info;
mfxFrameData *pData = &pSurface->Data;
if (pInfo->CropH > 0 && pInfo->CropW > 0)
{
if (pInfo->CropH > 0 && pInfo->CropW > 0) {
w = pInfo->CropW;
h = pInfo->CropH;
}
else
{
} else {
w = pInfo->Width;
h = pInfo->Height;
}
@@ -460,7 +454,7 @@ mfxStatus QSV_Encoder_Internal::LoadNV12(mfxFrameSurface1 *pSurface,
return MFX_ERR_NONE;
}
int QSV_Encoder_Internal::GetFreeTaskIndex(Task* pTaskPool, mfxU16 nPoolSize)
int QSV_Encoder_Internal::GetFreeTaskIndex(Task *pTaskPool, mfxU16 nPoolSize)
{
if (pTaskPool)
for (int i = 0; i < nPoolSize; i++)
@@ -470,8 +464,8 @@ int QSV_Encoder_Internal::GetFreeTaskIndex(Task* pTaskPool, mfxU16 nPoolSize)
}
mfxStatus QSV_Encoder_Internal::Encode(uint64_t ts, uint8_t *pDataY,
uint8_t *pDataUV, uint32_t strideY, uint32_t strideUV,
mfxBitstream **pBS)
uint8_t *pDataUV, uint32_t strideY,
uint32_t strideUV, mfxBitstream **pBS)
{
mfxStatus sts = MFX_ERR_NONE;
*pBS = NULL;
@@ -492,13 +486,13 @@ mfxStatus QSV_Encoder_Internal::Encode(uint64_t ts, uint8_t *pDataY,
while (MFX_ERR_NOT_FOUND == nTaskIdx || MFX_ERR_NOT_FOUND == nSurfIdx) {
// No more free tasks or surfaces, need to sync
sts = m_session.SyncOperation(m_pTaskPool[m_nFirstSyncTask].syncp,
60000);
sts = m_session.SyncOperation(
m_pTaskPool[m_nFirstSyncTask].syncp, 60000);
MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts);
mfxU8 *pTemp = m_outBitstream.Data;
memcpy(&m_outBitstream, &m_pTaskPool[m_nFirstSyncTask].mfxBS,
sizeof(mfxBitstream));
sizeof(mfxBitstream));
m_pTaskPool[m_nFirstSyncTask].mfxBS.Data = pTemp;
m_pTaskPool[m_nFirstSyncTask].mfxBS.DataLength = 0;
@@ -527,7 +521,8 @@ mfxStatus QSV_Encoder_Internal::Encode(uint64_t ts, uint8_t *pDataY,
mfxFrameSurface1 *pSurface = m_pmfxSurfaces[nSurfIdx];
if (m_bUseD3D11 || m_bD3D9HACK) {
sts = m_mfxAllocator.Lock(m_mfxAllocator.pthis,
pSurface->Data.MemId, &(pSurface->Data));
pSurface->Data.MemId,
&(pSurface->Data));
MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts);
}
@@ -537,22 +532,24 @@ mfxStatus QSV_Encoder_Internal::Encode(uint64_t ts, uint8_t *pDataY,
if (m_bUseD3D11 || m_bD3D9HACK) {
sts = m_mfxAllocator.Unlock(m_mfxAllocator.pthis,
pSurface->Data.MemId, &(pSurface->Data));
pSurface->Data.MemId,
&(pSurface->Data));
MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts);
}
for (;;) {
// Encode a frame asynchronously (returns immediately)
sts = m_pmfxENC->EncodeFrameAsync(NULL, pSurface,
&m_pTaskPool[nTaskIdx].mfxBS,
&m_pTaskPool[nTaskIdx].syncp);
&m_pTaskPool[nTaskIdx].mfxBS,
&m_pTaskPool[nTaskIdx].syncp);
if (MFX_ERR_NONE < sts && !m_pTaskPool[nTaskIdx].syncp) {
// Repeat the call if warning and no output
if (MFX_WRN_DEVICE_BUSY == sts)
MSDK_SLEEP(1); // Wait if device is busy, then repeat the same call
MSDK_SLEEP(
1); // Wait if device is busy, then repeat the same call
} else if (MFX_ERR_NONE < sts && m_pTaskPool[nTaskIdx].syncp) {
sts = MFX_ERR_NONE; // Ignore warnings if output is available
sts = MFX_ERR_NONE; // Ignore warnings if output is available
break;
} else if (MFX_ERR_NOT_ENOUGH_BUFFER == sts) {
// Allocate more bitstream buffer memory here if needed...
@@ -569,7 +566,8 @@ mfxStatus QSV_Encoder_Internal::Drain()
mfxStatus sts = MFX_ERR_NONE;
while (m_pTaskPool && m_pTaskPool[m_nFirstSyncTask].syncp) {
sts = m_session.SyncOperation(m_pTaskPool[m_nFirstSyncTask].syncp, 60000);
sts = m_session.SyncOperation(
m_pTaskPool[m_nFirstSyncTask].syncp, 60000);
MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts);
m_pTaskPool[m_nFirstSyncTask].syncp = NULL;
@@ -584,8 +582,7 @@ mfxStatus QSV_Encoder_Internal::ClearData()
mfxStatus sts = MFX_ERR_NONE;
sts = Drain();
if (m_pmfxENC)
{
if (m_pmfxENC) {
sts = m_pmfxENC->Close();
delete m_pmfxENC;
m_pmfxENC = NULL;
@@ -610,8 +607,7 @@ mfxStatus QSV_Encoder_Internal::ClearData()
MSDK_SAFE_DELETE_ARRAY(m_pTaskPool);
}
if (m_outBitstream.Data)
{
if (m_outBitstream.Data) {
delete m_outBitstream.Data;
m_outBitstream.Data = NULL;
}

View File

@@ -59,57 +59,57 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "QSV_Encoder.h"
#include "common_utils.h"
class QSV_Encoder_Internal
{
class QSV_Encoder_Internal {
public:
QSV_Encoder_Internal(mfxIMPL& impl, mfxVersion& version);
QSV_Encoder_Internal(mfxIMPL &impl, mfxVersion &version);
~QSV_Encoder_Internal();
mfxStatus Open(qsv_param_t * pParams);
void GetSPSPPS(mfxU8 **pSPSBuf, mfxU8 **pPPSBuf,
mfxU16 *pnSPSBuf, mfxU16 *pnPPSBuf);
mfxStatus Encode(uint64_t ts, uint8_t *pDataY, uint8_t *pDataUV,
uint32_t strideY, uint32_t strideUV, mfxBitstream
**pBS);
mfxStatus ClearData();
mfxStatus Reset(qsv_param_t *pParams);
mfxStatus Open(qsv_param_t *pParams);
void GetSPSPPS(mfxU8 **pSPSBuf, mfxU8 **pPPSBuf, mfxU16 *pnSPSBuf,
mfxU16 *pnPPSBuf);
mfxStatus Encode(uint64_t ts, uint8_t *pDataY, uint8_t *pDataUV,
uint32_t strideY, uint32_t strideUV,
mfxBitstream **pBS);
mfxStatus ClearData();
mfxStatus Reset(qsv_param_t *pParams);
protected:
bool InitParams(qsv_param_t * pParams);
mfxStatus AllocateSurfaces();
mfxStatus GetVideoParam();
mfxStatus InitBitstream();
mfxStatus LoadNV12(mfxFrameSurface1 *pSurface, uint8_t *pDataY,
uint8_t *pDataUV, uint32_t strideY, uint32_t strideUV);
mfxStatus Drain();
int GetFreeTaskIndex(Task* pTaskPool, mfxU16 nPoolSize);
bool InitParams(qsv_param_t *pParams);
mfxStatus AllocateSurfaces();
mfxStatus GetVideoParam();
mfxStatus InitBitstream();
mfxStatus LoadNV12(mfxFrameSurface1 *pSurface, uint8_t *pDataY,
uint8_t *pDataUV, uint32_t strideY,
uint32_t strideUV);
mfxStatus Drain();
int GetFreeTaskIndex(Task *pTaskPool, mfxU16 nPoolSize);
private:
mfxIMPL m_impl;
mfxVersion m_ver;
MFXVideoSession m_session;
mfxFrameAllocator m_mfxAllocator;
mfxVideoParam m_mfxEncParams;
mfxFrameAllocResponse m_mfxResponse;
mfxFrameSurface1** m_pmfxSurfaces;
mfxU16 m_nSurfNum;
MFXVideoENCODE* m_pmfxENC;
mfxU8 m_SPSBuffer[100];
mfxU8 m_PPSBuffer[100];
mfxU16 m_nSPSBufferSize;
mfxU16 m_nPPSBufferSize;
mfxVideoParam m_parameter;
mfxExtCodingOption2 m_co2;
mfxExtCodingOption m_co;
mfxU16 m_nTaskPool;
Task* m_pTaskPool;
int m_nTaskIdx;
int m_nFirstSyncTask;
mfxBitstream m_outBitstream;
bool m_bIsWindows8OrGreater;
bool m_bUseD3D11;
bool m_bD3D9HACK;
static mfxU16 g_numEncodersOpen;
static mfxHDL g_DX_Handle; // we only want one handle for all instances to use;
mfxIMPL m_impl;
mfxVersion m_ver;
MFXVideoSession m_session;
mfxFrameAllocator m_mfxAllocator;
mfxVideoParam m_mfxEncParams;
mfxFrameAllocResponse m_mfxResponse;
mfxFrameSurface1 **m_pmfxSurfaces;
mfxU16 m_nSurfNum;
MFXVideoENCODE *m_pmfxENC;
mfxU8 m_SPSBuffer[100];
mfxU8 m_PPSBuffer[100];
mfxU16 m_nSPSBufferSize;
mfxU16 m_nPPSBufferSize;
mfxVideoParam m_parameter;
mfxExtCodingOption2 m_co2;
mfxExtCodingOption m_co;
mfxU16 m_nTaskPool;
Task *m_pTaskPool;
int m_nTaskIdx;
int m_nFirstSyncTask;
mfxBitstream m_outBitstream;
bool m_bIsWindows8OrGreater;
bool m_bUseD3D11;
bool m_bD3D9HACK;
static mfxU16 g_numEncodersOpen;
static mfxHDL
g_DX_Handle; // we only want one handle for all instances to use;
};

View File

@@ -10,140 +10,132 @@ Copyright(c) 2005-2014 Intel Corporation. All Rights Reserved.
#include "common_directx11.h"
#include<map>
#include <map>
ID3D11Device* g_pD3D11Device;
ID3D11DeviceContext* g_pD3D11Ctx;
IDXGIFactory2* g_pDXGIFactory;
IDXGIAdapter* g_pAdapter;
ID3D11Device *g_pD3D11Device;
ID3D11DeviceContext *g_pD3D11Ctx;
IDXGIFactory2 *g_pDXGIFactory;
IDXGIAdapter *g_pAdapter;
std::map<mfxMemId*, mfxHDL> allocResponses;
std::map<mfxMemId *, mfxHDL> allocResponses;
std::map<mfxHDL, mfxFrameAllocResponse> allocDecodeResponses;
std::map<mfxHDL, int> allocDecodeRefCount;
std::map<mfxHDL, int> allocDecodeRefCount;
typedef struct {
mfxMemId memId;
mfxMemId memIdStage;
mfxU16 rw;
mfxMemId memId;
mfxMemId memIdStage;
mfxU16 rw;
} CustomMemId;
const struct {
mfxIMPL impl; // actual implementation
mfxU32 adapterID; // device adapter number
} implTypes[] = {
{MFX_IMPL_HARDWARE, 0},
{MFX_IMPL_HARDWARE2, 1},
{MFX_IMPL_HARDWARE3, 2},
{MFX_IMPL_HARDWARE4, 3}
};
mfxIMPL impl; // actual implementation
mfxU32 adapterID; // device adapter number
} implTypes[] = {{MFX_IMPL_HARDWARE, 0},
{MFX_IMPL_HARDWARE2, 1},
{MFX_IMPL_HARDWARE3, 2},
{MFX_IMPL_HARDWARE4, 3}};
// =================================================================
// DirectX functionality required to manage DX11 device and surfaces
//
IDXGIAdapter* GetIntelDeviceAdapterHandle(mfxSession session)
IDXGIAdapter *GetIntelDeviceAdapterHandle(mfxSession session)
{
mfxU32 adapterNum = 0;
mfxIMPL impl;
mfxU32 adapterNum = 0;
mfxIMPL impl;
MFXQueryIMPL(session, &impl);
MFXQueryIMPL(session, &impl);
mfxIMPL baseImpl = MFX_IMPL_BASETYPE(impl); // Extract Media SDK base implementation type
mfxIMPL baseImpl = MFX_IMPL_BASETYPE(
impl); // Extract Media SDK base implementation type
// get corresponding adapter number
for (mfxU8 i = 0; i < sizeof(implTypes)/sizeof(implTypes[0]); i++) {
if (implTypes[i].impl == baseImpl) {
adapterNum = implTypes[i].adapterID;
break;
}
}
// get corresponding adapter number
for (mfxU8 i = 0; i < sizeof(implTypes) / sizeof(implTypes[0]); i++) {
if (implTypes[i].impl == baseImpl) {
adapterNum = implTypes[i].adapterID;
break;
}
}
HRESULT hres = CreateDXGIFactory(__uuidof(IDXGIFactory2), (void**)(&g_pDXGIFactory) );
if (FAILED(hres)) return NULL;
HRESULT hres = CreateDXGIFactory(__uuidof(IDXGIFactory2),
(void **)(&g_pDXGIFactory));
if (FAILED(hres))
return NULL;
IDXGIAdapter* adapter;
hres = g_pDXGIFactory->EnumAdapters(adapterNum, &adapter);
if (FAILED(hres)) return NULL;
IDXGIAdapter *adapter;
hres = g_pDXGIFactory->EnumAdapters(adapterNum, &adapter);
if (FAILED(hres))
return NULL;
return adapter;
return adapter;
}
// Create HW device context
mfxStatus CreateHWDevice(mfxSession session, mfxHDL* deviceHandle, HWND hWnd, bool bCreateSharedHandles)
mfxStatus CreateHWDevice(mfxSession session, mfxHDL *deviceHandle, HWND hWnd,
bool bCreateSharedHandles)
{
//Note: not using bCreateSharedHandles for DX11 -- for API consistency only
hWnd; // Window handle not required by DX11 since we do not showcase rendering.
bCreateSharedHandles; // For rendering, not used here. Just for consistencies sake.
//Note: not using bCreateSharedHandles for DX11 -- for API consistency only
hWnd; // Window handle not required by DX11 since we do not showcase rendering.
bCreateSharedHandles; // For rendering, not used here. Just for consistencies sake.
HRESULT hres = S_OK;
HRESULT hres = S_OK;
static D3D_FEATURE_LEVEL FeatureLevels[] = {
D3D_FEATURE_LEVEL_11_1,
D3D_FEATURE_LEVEL_11_0,
D3D_FEATURE_LEVEL_10_1,
D3D_FEATURE_LEVEL_10_0
};
D3D_FEATURE_LEVEL pFeatureLevelsOut;
static D3D_FEATURE_LEVEL FeatureLevels[] = {D3D_FEATURE_LEVEL_11_1,
D3D_FEATURE_LEVEL_11_0,
D3D_FEATURE_LEVEL_10_1,
D3D_FEATURE_LEVEL_10_0};
D3D_FEATURE_LEVEL pFeatureLevelsOut;
g_pAdapter = GetIntelDeviceAdapterHandle(session);
if (NULL == g_pAdapter)
return MFX_ERR_DEVICE_FAILED;
g_pAdapter = GetIntelDeviceAdapterHandle(session);
if (NULL == g_pAdapter)
return MFX_ERR_DEVICE_FAILED;
UINT dxFlags = 0;
//UINT dxFlags = D3D11_CREATE_DEVICE_DEBUG;
UINT dxFlags = 0;
//UINT dxFlags = D3D11_CREATE_DEVICE_DEBUG;
hres = D3D11CreateDevice( g_pAdapter,
D3D_DRIVER_TYPE_UNKNOWN,
NULL,
dxFlags,
FeatureLevels,
(sizeof(FeatureLevels) / sizeof(FeatureLevels[0])),
D3D11_SDK_VERSION,
&g_pD3D11Device,
&pFeatureLevelsOut,
&g_pD3D11Ctx);
if (FAILED(hres))
return MFX_ERR_DEVICE_FAILED;
hres = D3D11CreateDevice(
g_pAdapter, D3D_DRIVER_TYPE_UNKNOWN, NULL, dxFlags,
FeatureLevels,
(sizeof(FeatureLevels) / sizeof(FeatureLevels[0])),
D3D11_SDK_VERSION, &g_pD3D11Device, &pFeatureLevelsOut,
&g_pD3D11Ctx);
if (FAILED(hres))
return MFX_ERR_DEVICE_FAILED;
// turn on multithreading for the DX11 context
CComQIPtr<ID3D10Multithread> p_mt(g_pD3D11Ctx);
if (p_mt)
p_mt->SetMultithreadProtected(true);
else
return MFX_ERR_DEVICE_FAILED;
// turn on multithreading for the DX11 context
CComQIPtr<ID3D10Multithread> p_mt(g_pD3D11Ctx);
if (p_mt)
p_mt->SetMultithreadProtected(true);
else
return MFX_ERR_DEVICE_FAILED;
*deviceHandle = (mfxHDL)g_pD3D11Device;
*deviceHandle = (mfxHDL)g_pD3D11Device;
return MFX_ERR_NONE;
return MFX_ERR_NONE;
}
void SetHWDeviceContext(CComPtr<ID3D11DeviceContext> devCtx)
{
g_pD3D11Ctx = devCtx;
devCtx->GetDevice(&g_pD3D11Device);
g_pD3D11Ctx = devCtx;
devCtx->GetDevice(&g_pD3D11Device);
}
// Free HW device context
void CleanupHWDevice()
{
if (g_pAdapter)
{
if (g_pAdapter) {
g_pAdapter->Release();
g_pAdapter = NULL;
}
if (g_pD3D11Device)
{
if (g_pD3D11Device) {
g_pD3D11Device->Release();
g_pD3D11Device = NULL;
}
if (g_pD3D11Ctx)
{
if (g_pD3D11Ctx) {
g_pD3D11Ctx->Release();
g_pD3D11Ctx = NULL;
}
if (g_pDXGIFactory)
{
if (g_pDXGIFactory) {
g_pDXGIFactory->Release();
g_pDXGIFactory = NULL;
}
@@ -151,7 +143,7 @@ void CleanupHWDevice()
CComPtr<ID3D11DeviceContext> GetHWDeviceContext()
{
return g_pD3D11Ctx;
return g_pD3D11Ctx;
}
/* (Hugh) Functions currently unused */
@@ -170,318 +162,337 @@ void ClearRGBSurfaceD3D(mfxMemId memId)
//
// Intel Media SDK memory allocator entrypoints....
//
mfxStatus _simple_alloc(mfxFrameAllocRequest* request, mfxFrameAllocResponse* response)
mfxStatus _simple_alloc(mfxFrameAllocRequest *request,
mfxFrameAllocResponse *response)
{
HRESULT hRes;
HRESULT hRes;
// Determine surface format
DXGI_FORMAT format;
if (MFX_FOURCC_NV12 == request->Info.FourCC)
format = DXGI_FORMAT_NV12;
else if (MFX_FOURCC_RGB4 == request->Info.FourCC)
format = DXGI_FORMAT_B8G8R8A8_UNORM;
else if (MFX_FOURCC_YUY2== request->Info.FourCC)
format = DXGI_FORMAT_YUY2;
else if (MFX_FOURCC_P8 == request->Info.FourCC ) //|| MFX_FOURCC_P8_TEXTURE == request->Info.FourCC
format = DXGI_FORMAT_P8;
else
format = DXGI_FORMAT_UNKNOWN;
// Determine surface format
DXGI_FORMAT format;
if (MFX_FOURCC_NV12 == request->Info.FourCC)
format = DXGI_FORMAT_NV12;
else if (MFX_FOURCC_RGB4 == request->Info.FourCC)
format = DXGI_FORMAT_B8G8R8A8_UNORM;
else if (MFX_FOURCC_YUY2 == request->Info.FourCC)
format = DXGI_FORMAT_YUY2;
else if (MFX_FOURCC_P8 ==
request->Info
.FourCC) //|| MFX_FOURCC_P8_TEXTURE == request->Info.FourCC
format = DXGI_FORMAT_P8;
else
format = DXGI_FORMAT_UNKNOWN;
if (DXGI_FORMAT_UNKNOWN == format)
return MFX_ERR_UNSUPPORTED;
if (DXGI_FORMAT_UNKNOWN == format)
return MFX_ERR_UNSUPPORTED;
// Allocate custom container to keep texture and stage buffers for each surface
// Container also stores the intended read and/or write operation.
CustomMemId **mids = (CustomMemId **)calloc(request->NumFrameSuggested,
sizeof(CustomMemId *));
if (!mids)
return MFX_ERR_MEMORY_ALLOC;
// Allocate custom container to keep texture and stage buffers for each surface
// Container also stores the intended read and/or write operation.
CustomMemId** mids = (CustomMemId**)calloc(request->NumFrameSuggested, sizeof(CustomMemId*));
if (!mids) return MFX_ERR_MEMORY_ALLOC;
for (int i = 0; i < request->NumFrameSuggested; i++) {
mids[i] = (CustomMemId *)calloc(1, sizeof(CustomMemId));
if (!mids[i]) {
return MFX_ERR_MEMORY_ALLOC;
}
mids[i]->rw = request->Type &
0xF000; // Set intended read/write operation
}
for (int i=0; i<request->NumFrameSuggested; i++) {
mids[i] = (CustomMemId*)calloc(1, sizeof(CustomMemId));
if (!mids[i]) {
return MFX_ERR_MEMORY_ALLOC;
}
mids[i]->rw = request->Type & 0xF000; // Set intended read/write operation
}
request->Type = request->Type & 0x0FFF;
request->Type = request->Type & 0x0FFF;
// because P8 data (bitstream) for h264 encoder should be allocated by CreateBuffer()
// but P8 data (MBData) for MPEG2 encoder should be allocated by CreateTexture2D()
if (request->Info.FourCC == MFX_FOURCC_P8) {
D3D11_BUFFER_DESC desc = {0};
// because P8 data (bitstream) for h264 encoder should be allocated by CreateBuffer()
// but P8 data (MBData) for MPEG2 encoder should be allocated by CreateTexture2D()
if (request->Info.FourCC == MFX_FOURCC_P8) {
D3D11_BUFFER_DESC desc = { 0 };
if (!request->NumFrameSuggested)
return MFX_ERR_MEMORY_ALLOC;
if (!request->NumFrameSuggested) return MFX_ERR_MEMORY_ALLOC;
desc.ByteWidth = request->Info.Width * request->Info.Height;
desc.Usage = D3D11_USAGE_STAGING;
desc.BindFlags = 0;
desc.CPUAccessFlags = D3D11_CPU_ACCESS_READ;
desc.MiscFlags = 0;
desc.StructureByteStride = 0;
desc.ByteWidth = request->Info.Width * request->Info.Height;
desc.Usage = D3D11_USAGE_STAGING;
desc.BindFlags = 0;
desc.CPUAccessFlags = D3D11_CPU_ACCESS_READ;
desc.MiscFlags = 0;
desc.StructureByteStride = 0;
ID3D11Buffer *buffer = 0;
hRes = g_pD3D11Device->CreateBuffer(&desc, 0, &buffer);
if (FAILED(hRes))
return MFX_ERR_MEMORY_ALLOC;
ID3D11Buffer* buffer = 0;
hRes = g_pD3D11Device->CreateBuffer(&desc, 0, &buffer);
if (FAILED(hRes))
return MFX_ERR_MEMORY_ALLOC;
mids[0]->memId = reinterpret_cast<ID3D11Texture2D *>(buffer);
} else {
D3D11_TEXTURE2D_DESC desc = {0};
mids[0]->memId = reinterpret_cast<ID3D11Texture2D*>(buffer);
} else {
D3D11_TEXTURE2D_DESC desc = {0};
desc.Width = request->Info.Width;
desc.Height = request->Info.Height;
desc.MipLevels = 1;
desc.ArraySize = 1; // number of subresources is 1 in this case
desc.Format = format;
desc.SampleDesc.Count = 1;
desc.Usage = D3D11_USAGE_DEFAULT;
desc.BindFlags = D3D11_BIND_DECODER;
desc.MiscFlags = 0;
//desc.MiscFlags = D3D11_RESOURCE_MISC_SHARED;
desc.Width = request->Info.Width;
desc.Height = request->Info.Height;
desc.MipLevels = 1;
desc.ArraySize = 1; // number of subresources is 1 in this case
desc.Format = format;
desc.SampleDesc.Count = 1;
desc.Usage = D3D11_USAGE_DEFAULT;
desc.BindFlags = D3D11_BIND_DECODER;
desc.MiscFlags = 0;
//desc.MiscFlags = D3D11_RESOURCE_MISC_SHARED;
if ((MFX_MEMTYPE_FROM_VPPIN & request->Type) &&
(DXGI_FORMAT_B8G8R8A8_UNORM == desc.Format)) {
desc.BindFlags = D3D11_BIND_RENDER_TARGET;
if (desc.ArraySize > 2)
return MFX_ERR_MEMORY_ALLOC;
}
if ( (MFX_MEMTYPE_FROM_VPPIN & request->Type) &&
(DXGI_FORMAT_B8G8R8A8_UNORM == desc.Format) ) {
desc.BindFlags = D3D11_BIND_RENDER_TARGET;
if (desc.ArraySize > 2)
return MFX_ERR_MEMORY_ALLOC;
}
if ((MFX_MEMTYPE_FROM_VPPOUT & request->Type) ||
(MFX_MEMTYPE_VIDEO_MEMORY_PROCESSOR_TARGET &
request->Type)) {
desc.BindFlags = D3D11_BIND_RENDER_TARGET;
if (desc.ArraySize > 2)
return MFX_ERR_MEMORY_ALLOC;
}
if ( (MFX_MEMTYPE_FROM_VPPOUT & request->Type) ||
(MFX_MEMTYPE_VIDEO_MEMORY_PROCESSOR_TARGET & request->Type)) {
desc.BindFlags = D3D11_BIND_RENDER_TARGET;
if (desc.ArraySize > 2)
return MFX_ERR_MEMORY_ALLOC;
}
if (DXGI_FORMAT_P8 == desc.Format)
desc.BindFlags = 0;
if ( DXGI_FORMAT_P8 == desc.Format )
desc.BindFlags = 0;
ID3D11Texture2D *pTexture2D;
ID3D11Texture2D* pTexture2D;
// Create surface textures
for (size_t i = 0;
i < request->NumFrameSuggested / desc.ArraySize; i++) {
hRes = g_pD3D11Device->CreateTexture2D(&desc, NULL,
&pTexture2D);
// Create surface textures
for (size_t i = 0; i < request->NumFrameSuggested / desc.ArraySize; i++) {
hRes = g_pD3D11Device->CreateTexture2D(&desc, NULL, &pTexture2D);
if (FAILED(hRes))
return MFX_ERR_MEMORY_ALLOC;
if (FAILED(hRes))
return MFX_ERR_MEMORY_ALLOC;
mids[i]->memId = pTexture2D;
}
mids[i]->memId = pTexture2D;
}
desc.ArraySize = 1;
desc.Usage = D3D11_USAGE_STAGING;
desc.CPUAccessFlags =
D3D11_CPU_ACCESS_READ; // | D3D11_CPU_ACCESS_WRITE;
desc.BindFlags = 0;
desc.MiscFlags = 0;
//desc.MiscFlags = D3D11_RESOURCE_MISC_SHARED;
desc.ArraySize = 1;
desc.Usage = D3D11_USAGE_STAGING;
desc.CPUAccessFlags = D3D11_CPU_ACCESS_READ;// | D3D11_CPU_ACCESS_WRITE;
desc.BindFlags = 0;
desc.MiscFlags = 0;
//desc.MiscFlags = D3D11_RESOURCE_MISC_SHARED;
// Create surface staging textures
for (size_t i = 0; i < request->NumFrameSuggested; i++) {
hRes = g_pD3D11Device->CreateTexture2D(&desc, NULL,
&pTexture2D);
// Create surface staging textures
for (size_t i = 0; i < request->NumFrameSuggested; i++) {
hRes = g_pD3D11Device->CreateTexture2D(&desc, NULL, &pTexture2D);
if (FAILED(hRes))
return MFX_ERR_MEMORY_ALLOC;
if (FAILED(hRes))
return MFX_ERR_MEMORY_ALLOC;
mids[i]->memIdStage = pTexture2D;
}
}
mids[i]->memIdStage = pTexture2D;
}
}
response->mids = (mfxMemId *)mids;
response->NumFrameActual = request->NumFrameSuggested;
response->mids = (mfxMemId*)mids;
response->NumFrameActual = request->NumFrameSuggested;
return MFX_ERR_NONE;
return MFX_ERR_NONE;
}
mfxStatus simple_alloc(mfxHDL pthis, mfxFrameAllocRequest* request, mfxFrameAllocResponse* response)
mfxStatus simple_alloc(mfxHDL pthis, mfxFrameAllocRequest *request,
mfxFrameAllocResponse *response)
{
mfxStatus sts = MFX_ERR_NONE;
mfxStatus sts = MFX_ERR_NONE;
if (request->Type & MFX_MEMTYPE_SYSTEM_MEMORY)
return MFX_ERR_UNSUPPORTED;
if (request->Type & MFX_MEMTYPE_SYSTEM_MEMORY)
return MFX_ERR_UNSUPPORTED;
if (allocDecodeResponses.find(pthis) != allocDecodeResponses.end() &&
MFX_MEMTYPE_EXTERNAL_FRAME & request->Type &&
MFX_MEMTYPE_FROM_DECODE & request->Type) {
// Memory for this request was already allocated during manual allocation stage. Return saved response
// When decode acceleration device (DXVA) is created it requires a list of d3d surfaces to be passed.
// Therefore Media SDK will ask for the surface info/mids again at Init() stage, thus requiring us to return the saved response
// (No such restriction applies to Encode or VPP)
*response = allocDecodeResponses[pthis];
allocDecodeRefCount[pthis]++;
} else {
sts = _simple_alloc(request, response);
if (allocDecodeResponses.find(pthis) != allocDecodeResponses.end() &&
MFX_MEMTYPE_EXTERNAL_FRAME & request->Type &&
MFX_MEMTYPE_FROM_DECODE & request->Type) {
// Memory for this request was already allocated during manual allocation stage. Return saved response
// When decode acceleration device (DXVA) is created it requires a list of d3d surfaces to be passed.
// Therefore Media SDK will ask for the surface info/mids again at Init() stage, thus requiring us to return the saved response
// (No such restriction applies to Encode or VPP)
*response = allocDecodeResponses[pthis];
allocDecodeRefCount[pthis]++;
} else {
sts = _simple_alloc(request, response);
if (MFX_ERR_NONE == sts) {
if ( MFX_MEMTYPE_EXTERNAL_FRAME & request->Type &&
MFX_MEMTYPE_FROM_DECODE & request->Type) {
// Decode alloc response handling
allocDecodeResponses[pthis] = *response;
allocDecodeRefCount[pthis]++;
} else {
// Encode and VPP alloc response handling
allocResponses[response->mids] = pthis;
}
}
}
if (MFX_ERR_NONE == sts) {
if (MFX_MEMTYPE_EXTERNAL_FRAME & request->Type &&
MFX_MEMTYPE_FROM_DECODE & request->Type) {
// Decode alloc response handling
allocDecodeResponses[pthis] = *response;
allocDecodeRefCount[pthis]++;
} else {
// Encode and VPP alloc response handling
allocResponses[response->mids] = pthis;
}
}
}
return sts;
return sts;
}
mfxStatus simple_lock(mfxHDL pthis, mfxMemId mid, mfxFrameData* ptr)
mfxStatus simple_lock(mfxHDL pthis, mfxMemId mid, mfxFrameData *ptr)
{
pthis; // To suppress warning for this unused parameter
pthis; // To suppress warning for this unused parameter
HRESULT hRes = S_OK;
HRESULT hRes = S_OK;
D3D11_TEXTURE2D_DESC desc = {0};
D3D11_MAPPED_SUBRESOURCE lockedRect = {0};
D3D11_TEXTURE2D_DESC desc = {0};
D3D11_MAPPED_SUBRESOURCE lockedRect = {0};
CustomMemId* memId = (CustomMemId*)mid;
ID3D11Texture2D* pSurface = (ID3D11Texture2D*)memId->memId;
ID3D11Texture2D* pStage = (ID3D11Texture2D*)memId->memIdStage;
CustomMemId *memId = (CustomMemId *)mid;
ID3D11Texture2D *pSurface = (ID3D11Texture2D *)memId->memId;
ID3D11Texture2D *pStage = (ID3D11Texture2D *)memId->memIdStage;
D3D11_MAP mapType = D3D11_MAP_READ;
UINT mapFlags = D3D11_MAP_FLAG_DO_NOT_WAIT;
D3D11_MAP mapType = D3D11_MAP_READ;
UINT mapFlags = D3D11_MAP_FLAG_DO_NOT_WAIT;
if (NULL == pStage) {
hRes = g_pD3D11Ctx->Map(pSurface, 0, mapType, mapFlags, &lockedRect);
desc.Format = DXGI_FORMAT_P8;
} else {
pSurface->GetDesc(&desc);
if (NULL == pStage) {
hRes = g_pD3D11Ctx->Map(pSurface, 0, mapType, mapFlags,
&lockedRect);
desc.Format = DXGI_FORMAT_P8;
} else {
pSurface->GetDesc(&desc);
// copy data only in case of user wants to read from stored surface
if (memId->rw & WILL_READ)
g_pD3D11Ctx->CopySubresourceRegion(pStage, 0, 0, 0, 0, pSurface, 0, NULL);
// copy data only in case of user wants to read from stored surface
if (memId->rw & WILL_READ)
g_pD3D11Ctx->CopySubresourceRegion(pStage, 0, 0, 0, 0,
pSurface, 0, NULL);
do {
hRes = g_pD3D11Ctx->Map(pStage, 0, mapType, mapFlags, &lockedRect);
if (S_OK != hRes && DXGI_ERROR_WAS_STILL_DRAWING != hRes)
return MFX_ERR_LOCK_MEMORY;
} while (DXGI_ERROR_WAS_STILL_DRAWING == hRes);
}
do {
hRes = g_pD3D11Ctx->Map(pStage, 0, mapType, mapFlags,
&lockedRect);
if (S_OK != hRes &&
DXGI_ERROR_WAS_STILL_DRAWING != hRes)
return MFX_ERR_LOCK_MEMORY;
} while (DXGI_ERROR_WAS_STILL_DRAWING == hRes);
}
if (FAILED(hRes))
return MFX_ERR_LOCK_MEMORY;
if (FAILED(hRes))
return MFX_ERR_LOCK_MEMORY;
switch (desc.Format) {
case DXGI_FORMAT_NV12:
ptr->Pitch = (mfxU16)lockedRect.RowPitch;
ptr->Y = (mfxU8*)lockedRect.pData;
ptr->U = (mfxU8*)lockedRect.pData + desc.Height * lockedRect.RowPitch;
ptr->V = ptr->U + 1;
break;
case DXGI_FORMAT_B8G8R8A8_UNORM :
ptr->Pitch = (mfxU16)lockedRect.RowPitch;
ptr->B = (mfxU8*)lockedRect.pData;
ptr->G = ptr->B + 1;
ptr->R = ptr->B + 2;
ptr->A = ptr->B + 3;
break;
case DXGI_FORMAT_YUY2:
ptr->Pitch = (mfxU16)lockedRect.RowPitch;
ptr->Y = (mfxU8*)lockedRect.pData;
ptr->U = ptr->Y + 1;
ptr->V = ptr->Y + 3;
break;
case DXGI_FORMAT_P8 :
ptr->Pitch = (mfxU16)lockedRect.RowPitch;
ptr->Y = (mfxU8*)lockedRect.pData;
ptr->U = 0;
ptr->V = 0;
break;
default:
return MFX_ERR_LOCK_MEMORY;
}
switch (desc.Format) {
case DXGI_FORMAT_NV12:
ptr->Pitch = (mfxU16)lockedRect.RowPitch;
ptr->Y = (mfxU8 *)lockedRect.pData;
ptr->U = (mfxU8 *)lockedRect.pData +
desc.Height * lockedRect.RowPitch;
ptr->V = ptr->U + 1;
break;
case DXGI_FORMAT_B8G8R8A8_UNORM:
ptr->Pitch = (mfxU16)lockedRect.RowPitch;
ptr->B = (mfxU8 *)lockedRect.pData;
ptr->G = ptr->B + 1;
ptr->R = ptr->B + 2;
ptr->A = ptr->B + 3;
break;
case DXGI_FORMAT_YUY2:
ptr->Pitch = (mfxU16)lockedRect.RowPitch;
ptr->Y = (mfxU8 *)lockedRect.pData;
ptr->U = ptr->Y + 1;
ptr->V = ptr->Y + 3;
break;
case DXGI_FORMAT_P8:
ptr->Pitch = (mfxU16)lockedRect.RowPitch;
ptr->Y = (mfxU8 *)lockedRect.pData;
ptr->U = 0;
ptr->V = 0;
break;
default:
return MFX_ERR_LOCK_MEMORY;
}
return MFX_ERR_NONE;
return MFX_ERR_NONE;
}
mfxStatus simple_unlock(mfxHDL pthis, mfxMemId mid, mfxFrameData* ptr)
mfxStatus simple_unlock(mfxHDL pthis, mfxMemId mid, mfxFrameData *ptr)
{
pthis; // To suppress warning for this unused parameter
pthis; // To suppress warning for this unused parameter
CustomMemId* memId = (CustomMemId*)mid;
ID3D11Texture2D* pSurface = (ID3D11Texture2D*)memId->memId;
ID3D11Texture2D* pStage = (ID3D11Texture2D*)memId->memIdStage;
CustomMemId *memId = (CustomMemId *)mid;
ID3D11Texture2D *pSurface = (ID3D11Texture2D *)memId->memId;
ID3D11Texture2D *pStage = (ID3D11Texture2D *)memId->memIdStage;
if (NULL == pStage) {
g_pD3D11Ctx->Unmap(pSurface, 0);
} else {
g_pD3D11Ctx->Unmap(pStage, 0);
// copy data only in case of user wants to write to stored surface
if (memId->rw & WILL_WRITE)
g_pD3D11Ctx->CopySubresourceRegion(pSurface, 0, 0, 0, 0, pStage, 0, NULL);
}
if (NULL == pStage) {
g_pD3D11Ctx->Unmap(pSurface, 0);
} else {
g_pD3D11Ctx->Unmap(pStage, 0);
// copy data only in case of user wants to write to stored surface
if (memId->rw & WILL_WRITE)
g_pD3D11Ctx->CopySubresourceRegion(pSurface, 0, 0, 0, 0,
pStage, 0, NULL);
}
if (ptr) {
ptr->Pitch=0;
ptr->U=ptr->V=ptr->Y=0;
ptr->A=ptr->R=ptr->G=ptr->B=0;
}
if (ptr) {
ptr->Pitch = 0;
ptr->U = ptr->V = ptr->Y = 0;
ptr->A = ptr->R = ptr->G = ptr->B = 0;
}
return MFX_ERR_NONE;
return MFX_ERR_NONE;
}
mfxStatus simple_gethdl(mfxHDL pthis, mfxMemId mid, mfxHDL* handle)
mfxStatus simple_gethdl(mfxHDL pthis, mfxMemId mid, mfxHDL *handle)
{
pthis; // To suppress warning for this unused parameter
pthis; // To suppress warning for this unused parameter
if (NULL == handle)
return MFX_ERR_INVALID_HANDLE;
if (NULL == handle)
return MFX_ERR_INVALID_HANDLE;
mfxHDLPair* pPair = (mfxHDLPair*)handle;
CustomMemId* memId = (CustomMemId*)mid;
mfxHDLPair *pPair = (mfxHDLPair *)handle;
CustomMemId *memId = (CustomMemId *)mid;
pPair->first = memId->memId; // surface texture
pPair->second = 0;
pPair->first = memId->memId; // surface texture
pPair->second = 0;
return MFX_ERR_NONE;
return MFX_ERR_NONE;
}
mfxStatus _simple_free(mfxFrameAllocResponse* response)
mfxStatus _simple_free(mfxFrameAllocResponse *response)
{
if (response->mids) {
for (mfxU32 i = 0; i < response->NumFrameActual; i++) {
if (response->mids[i]) {
CustomMemId* mid = (CustomMemId*)response->mids[i];
ID3D11Texture2D* pSurface = (ID3D11Texture2D*)mid->memId;
ID3D11Texture2D* pStage = (ID3D11Texture2D*)mid->memIdStage;
if (response->mids) {
for (mfxU32 i = 0; i < response->NumFrameActual; i++) {
if (response->mids[i]) {
CustomMemId *mid =
(CustomMemId *)response->mids[i];
ID3D11Texture2D *pSurface =
(ID3D11Texture2D *)mid->memId;
ID3D11Texture2D *pStage =
(ID3D11Texture2D *)mid->memIdStage;
if (pSurface)
pSurface->Release();
if (pStage)
pStage->Release();
if (pSurface)
pSurface->Release();
if (pStage)
pStage->Release();
free(mid);
}
}
free(response->mids);
response->mids = NULL;
}
free(mid);
}
}
free(response->mids);
response->mids = NULL;
}
return MFX_ERR_NONE;
return MFX_ERR_NONE;
}
mfxStatus simple_free(mfxHDL pthis, mfxFrameAllocResponse* response)
mfxStatus simple_free(mfxHDL pthis, mfxFrameAllocResponse *response)
{
if (NULL == response)
return MFX_ERR_NULL_PTR;
if (NULL == response)
return MFX_ERR_NULL_PTR;
if (allocResponses.find(response->mids) == allocResponses.end()) {
// Decode free response handling
if (--allocDecodeRefCount[pthis] == 0) {
_simple_free(response);
allocDecodeResponses.erase(pthis);
allocDecodeRefCount.erase(pthis);
}
} else {
// Encode and VPP free response handling
allocResponses.erase(response->mids);
_simple_free(response);
}
if (allocResponses.find(response->mids) == allocResponses.end()) {
// Decode free response handling
if (--allocDecodeRefCount[pthis] == 0) {
_simple_free(response);
allocDecodeResponses.erase(pthis);
allocDecodeRefCount.erase(pthis);
}
} else {
// Encode and VPP free response handling
allocResponses.erase(response->mids);
_simple_free(response);
}
return MFX_ERR_NONE;
return MFX_ERR_NONE;
}

View File

@@ -31,7 +31,8 @@ Copyright(c) 2005-2014 Intel Corporation. All Rights Reserved.
// - Device must be active (but monitor does NOT have to be attached)
// - Device must be enabled in BIOS. Required for the case when used together with a discrete graphics card
// - For switchable graphics solutions (mobile) make sure that Intel device is the active device
mfxStatus CreateHWDevice(mfxSession session, mfxHDL* deviceHandle, HWND hWnd, bool bCreateSharedHandles);
mfxStatus CreateHWDevice(mfxSession session, mfxHDL *deviceHandle, HWND hWnd,
bool bCreateSharedHandles);
void CleanupHWDevice();
void SetHWDeviceContext(CComPtr<ID3D11DeviceContext> devCtx);
CComPtr<ID3D11DeviceContext> GetHWDeviceContext();

View File

@@ -7,15 +7,20 @@
#include <map>
#include <atlbase.h>
#define D3DFMT_NV12 (D3DFORMAT) MAKEFOURCC('N', 'V', '1', '2')
#define D3DFMT_YV12 (D3DFORMAT) MAKEFOURCC('Y', 'V', '1', '2')
#define D3DFMT_P010 (D3DFORMAT) MAKEFOURCC('P', '0', '1', '0')
#define MSDK_SAFE_FREE(X) \
{ \
if (X) { \
free(X); \
X = NULL; \
} \
}
#define D3DFMT_NV12 (D3DFORMAT)MAKEFOURCC('N','V','1','2')
#define D3DFMT_YV12 (D3DFORMAT)MAKEFOURCC('Y','V','1','2')
#define D3DFMT_P010 (D3DFORMAT)MAKEFOURCC('P','0','1','0')
#define MSDK_SAFE_FREE(X) {if (X) { free(X); X = NULL; }}
std::map<mfxMemId*, mfxHDL> dx9_allocResponses;
std::map<mfxMemId *, mfxHDL> dx9_allocResponses;
std::map<mfxHDL, mfxFrameAllocResponse> dx9_allocDecodeResponses;
std::map<mfxHDL, int> dx9_allocDecodeRefCount;
std::map<mfxHDL, int> dx9_allocDecodeRefCount;
CComPtr<IDirect3DDeviceManager9> m_manager;
CComPtr<IDirectXVideoDecoderService> m_decoderService;
@@ -24,61 +29,55 @@ HANDLE m_hDecoder;
HANDLE m_hProcessor;
DWORD m_surfaceUsage;
CD3D9Device* g_hwdevice;
CD3D9Device *g_hwdevice;
const struct {
mfxIMPL impl; // actual implementation
mfxU32 adapterID; // device adapter number
} implTypes[] = {
{ MFX_IMPL_HARDWARE, 0 },
{ MFX_IMPL_HARDWARE2, 1 },
{ MFX_IMPL_HARDWARE3, 2 },
{ MFX_IMPL_HARDWARE4, 3 }
mfxIMPL impl; // actual implementation
mfxU32 adapterID; // device adapter number
} implTypes[] = {{MFX_IMPL_HARDWARE, 0},
{MFX_IMPL_HARDWARE2, 1},
{MFX_IMPL_HARDWARE3, 2},
{MFX_IMPL_HARDWARE4, 3}};
struct mfxAllocatorParams {
virtual ~mfxAllocatorParams(){};
};
struct mfxAllocatorParams
{
virtual ~mfxAllocatorParams(){};
};
struct D3DAllocatorParams : mfxAllocatorParams {
IDirect3DDeviceManager9 *pManager;
DWORD surfaceUsage;
struct D3DAllocatorParams : mfxAllocatorParams
{
IDirect3DDeviceManager9 *pManager;
DWORD surfaceUsage;
D3DAllocatorParams()
: pManager()
, surfaceUsage()
{
}
D3DAllocatorParams() : pManager(), surfaceUsage() {}
};
mfxStatus DX9_Alloc_Init(D3DAllocatorParams *pParams)
{
D3DAllocatorParams *pd3dParams = 0;
pd3dParams = dynamic_cast<D3DAllocatorParams *>(pParams);
if (!pd3dParams)
return MFX_ERR_NOT_INITIALIZED;
D3DAllocatorParams *pd3dParams = 0;
pd3dParams = dynamic_cast<D3DAllocatorParams *>(pParams);
if (!pd3dParams)
return MFX_ERR_NOT_INITIALIZED;
m_manager = pd3dParams->pManager;
m_surfaceUsage = pd3dParams->surfaceUsage;
m_manager = pd3dParams->pManager;
m_surfaceUsage = pd3dParams->surfaceUsage;
return MFX_ERR_NONE;
return MFX_ERR_NONE;
}
mfxStatus DX9_CreateHWDevice(mfxSession session, mfxHDL* deviceHandle, HWND, bool)
mfxStatus DX9_CreateHWDevice(mfxSession session, mfxHDL *deviceHandle, HWND,
bool)
{
mfxStatus result;
g_hwdevice = new CD3D9Device;
mfxU32 adapterNum = 0;
mfxU32 adapterNum = 0;
mfxIMPL impl;
MFXQueryIMPL(session, &impl);
mfxIMPL baseImpl = MFX_IMPL_BASETYPE(impl); // Extract Media SDK base implementation type
mfxIMPL baseImpl = MFX_IMPL_BASETYPE(
impl); // Extract Media SDK base implementation type
// get corresponding adapter number
// get corresponding adapter number
for (mfxU8 i = 0; i < sizeof(implTypes) / sizeof(implTypes[0]); i++) {
if (implTypes[i].impl == baseImpl) {
adapterNum = implTypes[i].adapterID;
@@ -86,7 +85,7 @@ mfxStatus DX9_CreateHWDevice(mfxSession session, mfxHDL* deviceHandle, HWND, boo
}
}
POINT point = { 0, 0 };
POINT point = {0, 0};
HWND window = WindowFromPoint(point);
result = g_hwdevice->Init(window, 0, adapterNum);
@@ -96,9 +95,9 @@ mfxStatus DX9_CreateHWDevice(mfxSession session, mfxHDL* deviceHandle, HWND, boo
g_hwdevice->GetHandle(MFX_HANDLE_D3D9_DEVICE_MANAGER, deviceHandle);
D3DAllocatorParams dx9_allocParam;
dx9_allocParam.pManager = reinterpret_cast<IDirect3DDeviceManager9 *>(*deviceHandle);
dx9_allocParam.pManager =
reinterpret_cast<IDirect3DDeviceManager9 *>(*deviceHandle);
DX9_Alloc_Init(&dx9_allocParam);
return MFX_ERR_NONE;
}
@@ -135,8 +134,7 @@ void DX9_CleanupHWDevice()
D3DFORMAT ConvertMfxFourccToD3dFormat(mfxU32 fourcc)
{
switch (fourcc)
{
switch (fourcc) {
case MFX_FOURCC_NV12:
return D3DFMT_NV12;
case MFX_FOURCC_YV12:
@@ -158,15 +156,16 @@ D3DFORMAT ConvertMfxFourccToD3dFormat(mfxU32 fourcc)
}
}
mfxStatus dx9_simple_lock(mfxHDL pthis, mfxMemId mid, mfxFrameData* ptr)
mfxStatus dx9_simple_lock(mfxHDL pthis, mfxMemId mid, mfxFrameData *ptr)
{
pthis; // To suppress warning for this unused parameter
if (!ptr || !mid)
return MFX_ERR_NULL_PTR;
mfxHDLPair *dxmid = (mfxHDLPair*)mid;
IDirect3DSurface9 *pSurface = static_cast<IDirect3DSurface9*>(dxmid->first);
mfxHDLPair *dxmid = (mfxHDLPair *)mid;
IDirect3DSurface9 *pSurface =
static_cast<IDirect3DSurface9 *>(dxmid->first);
if (pSurface == 0)
return MFX_ERR_INVALID_HANDLE;
@@ -175,14 +174,10 @@ mfxStatus dx9_simple_lock(mfxHDL pthis, mfxMemId mid, mfxFrameData* ptr)
if (FAILED(hr))
return MFX_ERR_LOCK_MEMORY;
if (desc.Format != D3DFMT_NV12 &&
desc.Format != D3DFMT_YV12 &&
desc.Format != D3DFMT_YUY2 &&
desc.Format != D3DFMT_R8G8B8 &&
desc.Format != D3DFMT_A8R8G8B8 &&
desc.Format != D3DFMT_P8 &&
desc.Format != D3DFMT_P010 &&
desc.Format != D3DFMT_A2R10G10B10)
if (desc.Format != D3DFMT_NV12 && desc.Format != D3DFMT_YV12 &&
desc.Format != D3DFMT_YUY2 && desc.Format != D3DFMT_R8G8B8 &&
desc.Format != D3DFMT_A8R8G8B8 && desc.Format != D3DFMT_P8 &&
desc.Format != D3DFMT_P010 && desc.Format != D3DFMT_A2R10G10B10)
return MFX_ERR_LOCK_MEMORY;
D3DLOCKED_RECT locked;
@@ -191,8 +186,7 @@ mfxStatus dx9_simple_lock(mfxHDL pthis, mfxMemId mid, mfxFrameData* ptr)
if (FAILED(hr))
return MFX_ERR_LOCK_MEMORY;
switch ((DWORD)desc.Format)
{
switch ((DWORD)desc.Format) {
case D3DFMT_NV12:
ptr->Pitch = (mfxU16)locked.Pitch;
ptr->Y = (mfxU8 *)locked.pBits;
@@ -243,20 +237,20 @@ mfxStatus dx9_simple_lock(mfxHDL pthis, mfxMemId mid, mfxFrameData* ptr)
return MFX_ERR_NONE;
}
mfxStatus dx9_simple_unlock(mfxHDL, mfxMemId mid, mfxFrameData* ptr)
mfxStatus dx9_simple_unlock(mfxHDL, mfxMemId mid, mfxFrameData *ptr)
{
if (!mid)
return MFX_ERR_NULL_PTR;
mfxHDLPair *dxmid = (mfxHDLPair*)mid;
IDirect3DSurface9 *pSurface = static_cast<IDirect3DSurface9*>(dxmid->first);
mfxHDLPair *dxmid = (mfxHDLPair *)mid;
IDirect3DSurface9 *pSurface =
static_cast<IDirect3DSurface9 *>(dxmid->first);
if (pSurface == 0)
return MFX_ERR_INVALID_HANDLE;
pSurface->UnlockRect();
if (NULL != ptr)
{
if (NULL != ptr) {
ptr->Pitch = 0;
ptr->Y = 0;
ptr->U = 0;
@@ -266,17 +260,17 @@ mfxStatus dx9_simple_unlock(mfxHDL, mfxMemId mid, mfxFrameData* ptr)
return MFX_ERR_NONE;
}
mfxStatus dx9_simple_gethdl(mfxHDL, mfxMemId mid, mfxHDL* handle)
mfxStatus dx9_simple_gethdl(mfxHDL, mfxMemId mid, mfxHDL *handle)
{
if (!mid || !handle)
return MFX_ERR_NULL_PTR;
mfxHDLPair *dxMid = (mfxHDLPair*)mid;
mfxHDLPair *dxMid = (mfxHDLPair *)mid;
*handle = dxMid->first;
return MFX_ERR_NONE;
}
mfxStatus _dx9_simple_free(mfxFrameAllocResponse* response)
mfxStatus _dx9_simple_free(mfxFrameAllocResponse *response)
{
if (!response)
return MFX_ERR_NULL_PTR;
@@ -286,8 +280,10 @@ mfxStatus _dx9_simple_free(mfxFrameAllocResponse* response)
if (response->mids) {
for (mfxU32 i = 0; i < response->NumFrameActual; i++) {
if (response->mids[i]) {
mfxHDLPair *dxMids = (mfxHDLPair*)response->mids[i];
static_cast<IDirect3DSurface9*>(dxMids->first)->Release();
mfxHDLPair *dxMids =
(mfxHDLPair *)response->mids[i];
static_cast<IDirect3DSurface9 *>(dxMids->first)
->Release();
}
}
MSDK_SAFE_FREE(response->mids[0]);
@@ -297,28 +293,30 @@ mfxStatus _dx9_simple_free(mfxFrameAllocResponse* response)
return sts;
}
mfxStatus dx9_simple_free(mfxHDL pthis, mfxFrameAllocResponse* response)
mfxStatus dx9_simple_free(mfxHDL pthis, mfxFrameAllocResponse *response)
{
if (NULL == response)
return MFX_ERR_NULL_PTR;
if (NULL == response)
return MFX_ERR_NULL_PTR;
if (dx9_allocResponses.find(response->mids) == dx9_allocResponses.end()) {
// Decode free response handling
if (--dx9_allocDecodeRefCount[pthis] == 0) {
_dx9_simple_free(response);
dx9_allocDecodeResponses.erase(pthis);
dx9_allocDecodeRefCount.erase(pthis);
}
} else {
// Encode and VPP free response handling
dx9_allocResponses.erase(response->mids);
_dx9_simple_free(response);
}
if (dx9_allocResponses.find(response->mids) ==
dx9_allocResponses.end()) {
// Decode free response handling
if (--dx9_allocDecodeRefCount[pthis] == 0) {
_dx9_simple_free(response);
dx9_allocDecodeResponses.erase(pthis);
dx9_allocDecodeRefCount.erase(pthis);
}
} else {
// Encode and VPP free response handling
dx9_allocResponses.erase(response->mids);
_dx9_simple_free(response);
}
return MFX_ERR_NONE;
return MFX_ERR_NONE;
}
mfxStatus _dx9_simple_alloc(mfxFrameAllocRequest* request, mfxFrameAllocResponse* response)
mfxStatus _dx9_simple_alloc(mfxFrameAllocRequest *request,
mfxFrameAllocResponse *response)
{
HRESULT hr;
@@ -331,20 +329,16 @@ mfxStatus _dx9_simple_alloc(mfxFrameAllocRequest* request, mfxFrameAllocResponse
if (format == D3DFMT_UNKNOWN)
return MFX_ERR_UNSUPPORTED;
DWORD target;
DWORD target;
if (MFX_MEMTYPE_DXVA2_DECODER_TARGET & request->Type)
{
if (MFX_MEMTYPE_DXVA2_DECODER_TARGET & request->Type) {
target = DXVA2_VideoDecoderRenderTarget;
}
else if (MFX_MEMTYPE_DXVA2_PROCESSOR_TARGET & request->Type)
{
} else if (MFX_MEMTYPE_DXVA2_PROCESSOR_TARGET & request->Type) {
target = DXVA2_VideoProcessorRenderTarget;
}
else
} else
return MFX_ERR_UNSUPPORTED;
IDirectXVideoAccelerationService* videoService = NULL;
IDirectXVideoAccelerationService *videoService = NULL;
if (target == DXVA2_VideoProcessorRenderTarget) {
if (!m_hProcessor) {
@@ -352,20 +346,22 @@ mfxStatus _dx9_simple_alloc(mfxFrameAllocRequest* request, mfxFrameAllocResponse
if (FAILED(hr))
return MFX_ERR_MEMORY_ALLOC;
hr = m_manager->GetVideoService(m_hProcessor, IID_IDirectXVideoProcessorService, (void**)&m_processorService);
hr = m_manager->GetVideoService(
m_hProcessor, IID_IDirectXVideoProcessorService,
(void **)&m_processorService);
if (FAILED(hr))
return MFX_ERR_MEMORY_ALLOC;
}
videoService = m_processorService;
}
else {
if (!m_hDecoder)
{
} else {
if (!m_hDecoder) {
hr = m_manager->OpenDeviceHandle(&m_hDecoder);
if (FAILED(hr))
return MFX_ERR_MEMORY_ALLOC;
hr = m_manager->GetVideoService(m_hDecoder, IID_IDirectXVideoDecoderService, (void**)&m_decoderService);
hr = m_manager->GetVideoService(
m_hDecoder, IID_IDirectXVideoDecoderService,
(void **)&m_decoderService);
if (FAILED(hr))
return MFX_ERR_MEMORY_ALLOC;
}
@@ -373,8 +369,10 @@ mfxStatus _dx9_simple_alloc(mfxFrameAllocRequest* request, mfxFrameAllocResponse
}
mfxHDLPair *dxMids = NULL, **dxMidPtrs = NULL;
dxMids = (mfxHDLPair*)calloc(request->NumFrameSuggested, sizeof(mfxHDLPair));
dxMidPtrs = (mfxHDLPair**)calloc(request->NumFrameSuggested, sizeof(mfxHDLPair*));
dxMids = (mfxHDLPair *)calloc(request->NumFrameSuggested,
sizeof(mfxHDLPair));
dxMidPtrs = (mfxHDLPair **)calloc(request->NumFrameSuggested,
sizeof(mfxHDLPair *));
if (!dxMids || !dxMidPtrs) {
MSDK_SAFE_FREE(dxMids);
@@ -382,13 +380,16 @@ mfxStatus _dx9_simple_alloc(mfxFrameAllocRequest* request, mfxFrameAllocResponse
return MFX_ERR_MEMORY_ALLOC;
}
response->mids = (mfxMemId*)dxMidPtrs;
response->mids = (mfxMemId *)dxMidPtrs;
response->NumFrameActual = request->NumFrameSuggested;
if (request->Type & MFX_MEMTYPE_EXTERNAL_FRAME) {
for (int i = 0; i < request->NumFrameSuggested; i++) {
hr = videoService->CreateSurface(request->Info.Width, request->Info.Height, 0, format,
D3DPOOL_DEFAULT, m_surfaceUsage, target, (IDirect3DSurface9**)&dxMids[i].first, &dxMids[i].second);
hr = videoService->CreateSurface(
request->Info.Width, request->Info.Height, 0,
format, D3DPOOL_DEFAULT, m_surfaceUsage, target,
(IDirect3DSurface9 **)&dxMids[i].first,
&dxMids[i].second);
if (FAILED(hr)) {
_dx9_simple_free(response);
MSDK_SAFE_FREE(dxMids);
@@ -396,23 +397,22 @@ mfxStatus _dx9_simple_alloc(mfxFrameAllocRequest* request, mfxFrameAllocResponse
}
dxMidPtrs[i] = &dxMids[i];
}
}
else {
safe_array<IDirect3DSurface9*> dxSrf(new IDirect3DSurface9*[request->NumFrameSuggested]);
if (!dxSrf.get())
{
} else {
safe_array<IDirect3DSurface9 *> dxSrf(
new IDirect3DSurface9 *[request->NumFrameSuggested]);
if (!dxSrf.get()) {
MSDK_SAFE_FREE(dxMids);
return MFX_ERR_MEMORY_ALLOC;
}
hr = videoService->CreateSurface(request->Info.Width, request->Info.Height, request->NumFrameSuggested - 1, format,
D3DPOOL_DEFAULT, m_surfaceUsage, target, dxSrf.get(), NULL);
if (FAILED(hr))
{
hr = videoService->CreateSurface(
request->Info.Width, request->Info.Height,
request->NumFrameSuggested - 1, format, D3DPOOL_DEFAULT,
m_surfaceUsage, target, dxSrf.get(), NULL);
if (FAILED(hr)) {
MSDK_SAFE_FREE(dxMids);
return MFX_ERR_MEMORY_ALLOC;
}
for (int i = 0; i < request->NumFrameSuggested; i++) {
dxMids[i].first = dxSrf.get()[i];
dxMidPtrs[i] = &dxMids[i];
@@ -421,37 +421,39 @@ mfxStatus _dx9_simple_alloc(mfxFrameAllocRequest* request, mfxFrameAllocResponse
return MFX_ERR_NONE;
}
mfxStatus dx9_simple_alloc(mfxHDL pthis, mfxFrameAllocRequest* request, mfxFrameAllocResponse* response)
mfxStatus dx9_simple_alloc(mfxHDL pthis, mfxFrameAllocRequest *request,
mfxFrameAllocResponse *response)
{
mfxStatus sts = MFX_ERR_NONE;
mfxStatus sts = MFX_ERR_NONE;
if (request->Type & MFX_MEMTYPE_SYSTEM_MEMORY)
return MFX_ERR_UNSUPPORTED;
if (request->Type & MFX_MEMTYPE_SYSTEM_MEMORY)
return MFX_ERR_UNSUPPORTED;
if (dx9_allocDecodeResponses.find(pthis) != dx9_allocDecodeResponses.end() &&
MFX_MEMTYPE_EXTERNAL_FRAME & request->Type &&
MFX_MEMTYPE_FROM_DECODE & request->Type) {
// Memory for this request was already allocated during manual allocation stage. Return saved response
// When decode acceleration device (DXVA) is created it requires a list of d3d surfaces to be passed.
// Therefore Media SDK will ask for the surface info/mids again at Init() stage, thus requiring us to return the saved response
// (No such restriction applies to Encode or VPP)
*response = dx9_allocDecodeResponses[pthis];
dx9_allocDecodeRefCount[pthis]++;
} else {
sts = _dx9_simple_alloc(request, response);
if (dx9_allocDecodeResponses.find(pthis) !=
dx9_allocDecodeResponses.end() &&
MFX_MEMTYPE_EXTERNAL_FRAME & request->Type &&
MFX_MEMTYPE_FROM_DECODE & request->Type) {
// Memory for this request was already allocated during manual allocation stage. Return saved response
// When decode acceleration device (DXVA) is created it requires a list of d3d surfaces to be passed.
// Therefore Media SDK will ask for the surface info/mids again at Init() stage, thus requiring us to return the saved response
// (No such restriction applies to Encode or VPP)
*response = dx9_allocDecodeResponses[pthis];
dx9_allocDecodeRefCount[pthis]++;
} else {
sts = _dx9_simple_alloc(request, response);
if (MFX_ERR_NONE == sts) {
if ( MFX_MEMTYPE_EXTERNAL_FRAME & request->Type &&
MFX_MEMTYPE_FROM_DECODE & request->Type) {
// Decode alloc response handling
dx9_allocDecodeResponses[pthis] = *response;
dx9_allocDecodeRefCount[pthis]++;
} else {
// Encode and VPP alloc response handling
dx9_allocResponses[response->mids] = pthis;
}
}
}
if (MFX_ERR_NONE == sts) {
if (MFX_MEMTYPE_EXTERNAL_FRAME & request->Type &&
MFX_MEMTYPE_FROM_DECODE & request->Type) {
// Decode alloc response handling
dx9_allocDecodeResponses[pthis] = *response;
dx9_allocDecodeRefCount[pthis]++;
} else {
// Encode and VPP alloc response handling
dx9_allocResponses[response->mids] = pthis;
}
}
}
return sts;
return sts;
}

View File

@@ -27,44 +27,42 @@ MFX_HANDLE_GFXS3DCONTROL must be set prior if initializing for 2 views.
@note Device always set D3DPRESENT_PARAMETERS::Windowed to TRUE.
*/
template <class T>
class safe_array
{
public:
safe_array(T *ptr = 0):m_ptr(ptr)
{ // construct from object pointer
};
~safe_array()
{
reset(0);
}
T* get()
{ // return wrapped pointer
return m_ptr;
}
T* release()
{ // return wrapped pointer and give up ownership
T* ptr = m_ptr;
m_ptr = 0;
return ptr;
}
void reset(T* ptr)
{ // destroy designated object and store new pointer
if (m_ptr)
{
delete[] m_ptr;
}
m_ptr = ptr;
}
protected:
T* m_ptr; // the wrapped object pointer
};
template<class T> class safe_array {
public:
safe_array(T *ptr = 0)
: m_ptr(ptr){
// construct from object pointer
};
~safe_array() { reset(0); }
T *get()
{ // return wrapped pointer
return m_ptr;
}
T *release()
{ // return wrapped pointer and give up ownership
T *ptr = m_ptr;
m_ptr = 0;
return ptr;
}
void reset(T *ptr)
{ // destroy designated object and store new pointer
if (m_ptr) {
delete[] m_ptr;
}
m_ptr = ptr;
}
mfxStatus dx9_simple_alloc(mfxHDL pthis, mfxFrameAllocRequest* request, mfxFrameAllocResponse* response);
mfxStatus dx9_simple_lock(mfxHDL pthis, mfxMemId mid, mfxFrameData* ptr);
mfxStatus dx9_simple_unlock(mfxHDL pthis, mfxMemId mid, mfxFrameData* ptr);
mfxStatus dx9_simple_gethdl(mfxHDL pthis, mfxMemId mid, mfxHDL* handle);
mfxStatus dx9_simple_free(mfxHDL pthis, mfxFrameAllocResponse* response);
protected:
T *m_ptr; // the wrapped object pointer
};
mfxStatus DX9_CreateHWDevice(mfxSession session, mfxHDL* deviceHandle, HWND hWnd, bool bCreateSharedHandles);
mfxStatus dx9_simple_alloc(mfxHDL pthis, mfxFrameAllocRequest *request,
mfxFrameAllocResponse *response);
mfxStatus dx9_simple_lock(mfxHDL pthis, mfxMemId mid, mfxFrameData *ptr);
mfxStatus dx9_simple_unlock(mfxHDL pthis, mfxMemId mid, mfxFrameData *ptr);
mfxStatus dx9_simple_gethdl(mfxHDL pthis, mfxMemId mid, mfxHDL *handle);
mfxStatus dx9_simple_free(mfxHDL pthis, mfxFrameAllocResponse *response);
mfxStatus DX9_CreateHWDevice(mfxSession session, mfxHDL *deviceHandle,
HWND hWnd, bool bCreateSharedHandles);
void DX9_CleanupHWDevice();

View File

@@ -14,293 +14,296 @@ Copyright(c) 2005-2014 Intel Corporation. All Rights Reserved.
// Utility functions, not directly tied to Intel Media SDK functionality
//
void PrintErrString(int err,const char* filestr,int line)
void PrintErrString(int err, const char *filestr, int line)
{
switch (err) {
case 0:
printf("\n No error.\n");
break;
case -1:
printf("\n Unknown error: %s %d\n",filestr,line);
break;
case -2:
printf("\n Null pointer. Check filename/path + permissions? %s %d\n",filestr,line);
break;
case -3:
printf("\n Unsupported feature/library load error. %s %d\n",filestr,line);
break;
case -4:
printf("\n Could not allocate memory. %s %d\n",filestr,line);
break;
case -5:
printf("\n Insufficient IO buffers. %s %d\n",filestr,line);
break;
case -6:
printf("\n Invalid handle. %s %d\n",filestr,line);
break;
case -7:
printf("\n Memory lock failure. %s %d\n",filestr,line);
break;
case -8:
printf("\n Function called before initialization. %s %d\n",filestr,line);
break;
case -9:
printf("\n Specified object not found. %s %d\n",filestr,line);
break;
case -10:
printf("\n More input data expected. %s %d\n",filestr,line);
break;
case -11:
printf("\n More output surfaces expected. %s %d\n",filestr,line);
break;
case -12:
printf("\n Operation aborted. %s %d\n",filestr,line);
break;
case -13:
printf("\n HW device lost. %s %d\n",filestr,line);
break;
case -14:
printf("\n Incompatible video parameters. %s %d\n",filestr,line);
break;
case -15:
printf("\n Invalid video parameters. %s %d\n",filestr,line);
break;
case -16:
printf("\n Undefined behavior. %s %d\n",filestr,line);
break;
case -17:
printf("\n Device operation failure. %s %d\n",filestr,line);
break;
case -18:
printf("\n More bitstream data expected. %s %d\n",filestr,line);
break;
case -19:
printf("\n Incompatible audio parameters. %s %d\n",filestr,line);
break;
case -20:
printf("\n Invalid audio parameters. %s %d\n",filestr,line);
break;
default:
printf("\nError code %d,\t%s\t%d\n\n", err, filestr, line);
}
switch (err) {
case 0:
printf("\n No error.\n");
break;
case -1:
printf("\n Unknown error: %s %d\n", filestr, line);
break;
case -2:
printf("\n Null pointer. Check filename/path + permissions? %s %d\n",
filestr, line);
break;
case -3:
printf("\n Unsupported feature/library load error. %s %d\n",
filestr, line);
break;
case -4:
printf("\n Could not allocate memory. %s %d\n", filestr, line);
break;
case -5:
printf("\n Insufficient IO buffers. %s %d\n", filestr, line);
break;
case -6:
printf("\n Invalid handle. %s %d\n", filestr, line);
break;
case -7:
printf("\n Memory lock failure. %s %d\n", filestr, line);
break;
case -8:
printf("\n Function called before initialization. %s %d\n",
filestr, line);
break;
case -9:
printf("\n Specified object not found. %s %d\n", filestr, line);
break;
case -10:
printf("\n More input data expected. %s %d\n", filestr, line);
break;
case -11:
printf("\n More output surfaces expected. %s %d\n", filestr,
line);
break;
case -12:
printf("\n Operation aborted. %s %d\n", filestr, line);
break;
case -13:
printf("\n HW device lost. %s %d\n", filestr, line);
break;
case -14:
printf("\n Incompatible video parameters. %s %d\n", filestr,
line);
break;
case -15:
printf("\n Invalid video parameters. %s %d\n", filestr, line);
break;
case -16:
printf("\n Undefined behavior. %s %d\n", filestr, line);
break;
case -17:
printf("\n Device operation failure. %s %d\n", filestr, line);
break;
case -18:
printf("\n More bitstream data expected. %s %d\n", filestr,
line);
break;
case -19:
printf("\n Incompatible audio parameters. %s %d\n", filestr,
line);
break;
case -20:
printf("\n Invalid audio parameters. %s %d\n", filestr, line);
break;
default:
printf("\nError code %d,\t%s\t%d\n\n", err, filestr, line);
}
}
mfxStatus ReadPlaneData(mfxU16 w, mfxU16 h, mfxU8* buf, mfxU8* ptr,
mfxU16 pitch, mfxU16 offset, FILE* fSource)
mfxStatus ReadPlaneData(mfxU16 w, mfxU16 h, mfxU8 *buf, mfxU8 *ptr,
mfxU16 pitch, mfxU16 offset, FILE *fSource)
{
mfxU32 nBytesRead;
for (mfxU16 i = 0; i < h; i++) {
nBytesRead = (mfxU32) fread(buf, 1, w, fSource);
if (w != nBytesRead)
return MFX_ERR_MORE_DATA;
for (mfxU16 j = 0; j < w; j++)
ptr[i * pitch + j * 2 + offset] = buf[j];
}
return MFX_ERR_NONE;
mfxU32 nBytesRead;
for (mfxU16 i = 0; i < h; i++) {
nBytesRead = (mfxU32)fread(buf, 1, w, fSource);
if (w != nBytesRead)
return MFX_ERR_MORE_DATA;
for (mfxU16 j = 0; j < w; j++)
ptr[i * pitch + j * 2 + offset] = buf[j];
}
return MFX_ERR_NONE;
}
mfxStatus LoadRawFrame(mfxFrameSurface1* pSurface, FILE* fSource)
mfxStatus LoadRawFrame(mfxFrameSurface1 *pSurface, FILE *fSource)
{
if (!fSource) {
// Simulate instantaneous access to 1000 "empty" frames.
static int frameCount = 0;
if (1000 == frameCount++)
return MFX_ERR_MORE_DATA;
else
return MFX_ERR_NONE;
}
if (!fSource) {
// Simulate instantaneous access to 1000 "empty" frames.
static int frameCount = 0;
if (1000 == frameCount++)
return MFX_ERR_MORE_DATA;
else
return MFX_ERR_NONE;
}
mfxStatus sts = MFX_ERR_NONE;
mfxU32 nBytesRead;
mfxU16 w, h, i, pitch;
mfxU8* ptr;
mfxFrameInfo* pInfo = &pSurface->Info;
mfxFrameData* pData = &pSurface->Data;
mfxStatus sts = MFX_ERR_NONE;
mfxU32 nBytesRead;
mfxU16 w, h, i, pitch;
mfxU8 *ptr;
mfxFrameInfo *pInfo = &pSurface->Info;
mfxFrameData *pData = &pSurface->Data;
if (pInfo->CropH > 0 && pInfo->CropW > 0) {
w = pInfo->CropW;
h = pInfo->CropH;
} else {
w = pInfo->Width;
h = pInfo->Height;
}
if (pInfo->CropH > 0 && pInfo->CropW > 0) {
w = pInfo->CropW;
h = pInfo->CropH;
} else {
w = pInfo->Width;
h = pInfo->Height;
}
pitch = pData->Pitch;
ptr = pData->Y + pInfo->CropX + pInfo->CropY * pData->Pitch;
pitch = pData->Pitch;
ptr = pData->Y + pInfo->CropX + pInfo->CropY * pData->Pitch;
// read luminance plane
for (i = 0; i < h; i++) {
nBytesRead = (mfxU32) fread(ptr + i * pitch, 1, w, fSource);
if (w != nBytesRead)
return MFX_ERR_MORE_DATA;
}
// read luminance plane
for (i = 0; i < h; i++) {
nBytesRead = (mfxU32)fread(ptr + i * pitch, 1, w, fSource);
if (w != nBytesRead)
return MFX_ERR_MORE_DATA;
}
mfxU8 buf[2048]; // maximum supported chroma width for nv12
w /= 2;
h /= 2;
ptr = pData->UV + pInfo->CropX + (pInfo->CropY / 2) * pitch;
if (w > 2048)
return MFX_ERR_UNSUPPORTED;
mfxU8 buf[2048]; // maximum supported chroma width for nv12
w /= 2;
h /= 2;
ptr = pData->UV + pInfo->CropX + (pInfo->CropY / 2) * pitch;
if (w > 2048)
return MFX_ERR_UNSUPPORTED;
// load U
sts = ReadPlaneData(w, h, buf, ptr, pitch, 0, fSource);
if (MFX_ERR_NONE != sts)
return sts;
// load V
sts = ReadPlaneData(w, h, buf, ptr, pitch, 1, fSource);
if (MFX_ERR_NONE != sts)
return sts;
// load U
sts = ReadPlaneData(w, h, buf, ptr, pitch, 0, fSource);
if (MFX_ERR_NONE != sts)
return sts;
// load V
sts = ReadPlaneData(w, h, buf, ptr, pitch, 1, fSource);
if (MFX_ERR_NONE != sts)
return sts;
return MFX_ERR_NONE;
return MFX_ERR_NONE;
}
mfxStatus LoadRawRGBFrame(mfxFrameSurface1* pSurface, FILE* fSource)
mfxStatus LoadRawRGBFrame(mfxFrameSurface1 *pSurface, FILE *fSource)
{
if (!fSource) {
// Simulate instantaneous access to 1000 "empty" frames.
static int frameCount = 0;
if (1000 == frameCount++)
return MFX_ERR_MORE_DATA;
else
return MFX_ERR_NONE;
}
if (!fSource) {
// Simulate instantaneous access to 1000 "empty" frames.
static int frameCount = 0;
if (1000 == frameCount++)
return MFX_ERR_MORE_DATA;
else
return MFX_ERR_NONE;
}
size_t nBytesRead;
mfxU16 w, h;
mfxFrameInfo* pInfo = &pSurface->Info;
size_t nBytesRead;
mfxU16 w, h;
mfxFrameInfo *pInfo = &pSurface->Info;
if (pInfo->CropH > 0 && pInfo->CropW > 0) {
w = pInfo->CropW;
h = pInfo->CropH;
} else {
w = pInfo->Width;
h = pInfo->Height;
}
if (pInfo->CropH > 0 && pInfo->CropW > 0) {
w = pInfo->CropW;
h = pInfo->CropH;
} else {
w = pInfo->Width;
h = pInfo->Height;
}
for (mfxU16 i = 0; i < h; i++) {
nBytesRead = fread(pSurface->Data.B + i * pSurface->Data.Pitch,
1, w * 4, fSource);
if ((size_t)(w * 4) != nBytesRead)
return MFX_ERR_MORE_DATA;
}
for (mfxU16 i = 0; i < h; i++) {
nBytesRead = fread(pSurface->Data.B + i * pSurface->Data.Pitch,
1, w * 4, fSource);
if ((size_t)(w * 4) != nBytesRead)
return MFX_ERR_MORE_DATA;
}
return MFX_ERR_NONE;
return MFX_ERR_NONE;
}
mfxStatus WriteBitStreamFrame(mfxBitstream* pMfxBitstream, FILE* fSink)
mfxStatus WriteBitStreamFrame(mfxBitstream *pMfxBitstream, FILE *fSink)
{
mfxU32 nBytesWritten =
(mfxU32) fwrite(pMfxBitstream->Data + pMfxBitstream->DataOffset, 1,
pMfxBitstream->DataLength, fSink);
if (nBytesWritten != pMfxBitstream->DataLength)
return MFX_ERR_UNDEFINED_BEHAVIOR;
mfxU32 nBytesWritten =
(mfxU32)fwrite(pMfxBitstream->Data + pMfxBitstream->DataOffset,
1, pMfxBitstream->DataLength, fSink);
if (nBytesWritten != pMfxBitstream->DataLength)
return MFX_ERR_UNDEFINED_BEHAVIOR;
pMfxBitstream->DataLength = 0;
pMfxBitstream->DataLength = 0;
return MFX_ERR_NONE;
return MFX_ERR_NONE;
}
mfxStatus ReadBitStreamData(mfxBitstream* pBS, FILE* fSource)
mfxStatus ReadBitStreamData(mfxBitstream *pBS, FILE *fSource)
{
memmove(pBS->Data, pBS->Data + pBS->DataOffset, pBS->DataLength);
pBS->DataOffset = 0;
memmove(pBS->Data, pBS->Data + pBS->DataOffset, pBS->DataLength);
pBS->DataOffset = 0;
mfxU32 nBytesRead = (mfxU32) fread(pBS->Data + pBS->DataLength, 1,
pBS->MaxLength - pBS->DataLength,
fSource);
mfxU32 nBytesRead = (mfxU32)fread(pBS->Data + pBS->DataLength, 1,
pBS->MaxLength - pBS->DataLength,
fSource);
if (0 == nBytesRead)
return MFX_ERR_MORE_DATA;
if (0 == nBytesRead)
return MFX_ERR_MORE_DATA;
pBS->DataLength += nBytesRead;
pBS->DataLength += nBytesRead;
return MFX_ERR_NONE;
return MFX_ERR_NONE;
}
mfxStatus WriteSection(mfxU8* plane, mfxU16 factor, mfxU16 chunksize,
mfxFrameInfo* pInfo, mfxFrameData* pData, mfxU32 i,
mfxU32 j, FILE* fSink)
mfxStatus WriteSection(mfxU8 *plane, mfxU16 factor, mfxU16 chunksize,
mfxFrameInfo *pInfo, mfxFrameData *pData, mfxU32 i,
mfxU32 j, FILE *fSink)
{
if (chunksize !=
fwrite(plane +
(pInfo->CropY * pData->Pitch / factor + pInfo->CropX) +
i * pData->Pitch + j, 1, chunksize, fSink))
return MFX_ERR_UNDEFINED_BEHAVIOR;
return MFX_ERR_NONE;
if (chunksize != fwrite(plane +
(pInfo->CropY * pData->Pitch / factor +
pInfo->CropX) +
i * pData->Pitch + j,
1, chunksize, fSink))
return MFX_ERR_UNDEFINED_BEHAVIOR;
return MFX_ERR_NONE;
}
mfxStatus WriteRawFrame(mfxFrameSurface1* pSurface, FILE* fSink)
mfxStatus WriteRawFrame(mfxFrameSurface1 *pSurface, FILE *fSink)
{
mfxFrameInfo* pInfo = &pSurface->Info;
mfxFrameData* pData = &pSurface->Data;
mfxU32 i, j, h, w;
mfxStatus sts = MFX_ERR_NONE;
mfxFrameInfo *pInfo = &pSurface->Info;
mfxFrameData *pData = &pSurface->Data;
mfxU32 i, j, h, w;
mfxStatus sts = MFX_ERR_NONE;
for (i = 0; i < pInfo->CropH; i++)
sts =
WriteSection(pData->Y, 1, pInfo->CropW, pInfo, pData, i, 0,
fSink);
for (i = 0; i < pInfo->CropH; i++)
sts = WriteSection(pData->Y, 1, pInfo->CropW, pInfo, pData, i,
0, fSink);
h = pInfo->CropH / 2;
w = pInfo->CropW;
for (i = 0; i < h; i++)
for (j = 0; j < w; j += 2)
sts =
WriteSection(pData->UV, 2, 1, pInfo, pData, i, j,
fSink);
for (i = 0; i < h; i++)
for (j = 1; j < w; j += 2)
sts =
WriteSection(pData->UV, 2, 1, pInfo, pData, i, j,
fSink);
h = pInfo->CropH / 2;
w = pInfo->CropW;
for (i = 0; i < h; i++)
for (j = 0; j < w; j += 2)
sts = WriteSection(pData->UV, 2, 1, pInfo, pData, i, j,
fSink);
for (i = 0; i < h; i++)
for (j = 1; j < w; j += 2)
sts = WriteSection(pData->UV, 2, 1, pInfo, pData, i, j,
fSink);
return sts;
return sts;
}
int GetFreeTaskIndex(Task* pTaskPool, mfxU16 nPoolSize)
int GetFreeTaskIndex(Task *pTaskPool, mfxU16 nPoolSize)
{
if (pTaskPool)
for (int i = 0; i < nPoolSize; i++)
if (!pTaskPool[i].syncp)
return i;
return MFX_ERR_NOT_FOUND;
if (pTaskPool)
for (int i = 0; i < nPoolSize; i++)
if (!pTaskPool[i].syncp)
return i;
return MFX_ERR_NOT_FOUND;
}
void ClearYUVSurfaceSysMem(mfxFrameSurface1* pSfc, mfxU16 width, mfxU16 height)
void ClearYUVSurfaceSysMem(mfxFrameSurface1 *pSfc, mfxU16 width, mfxU16 height)
{
// In case simulating direct access to frames we initialize the allocated surfaces with default pattern
memset(pSfc->Data.Y, 100, width * height); // Y plane
memset(pSfc->Data.U, 50, (width * height)/2); // UV plane
// In case simulating direct access to frames we initialize the allocated surfaces with default pattern
memset(pSfc->Data.Y, 100, width * height); // Y plane
memset(pSfc->Data.U, 50, (width * height) / 2); // UV plane
}
// Get free raw frame surface
int GetFreeSurfaceIndex(mfxFrameSurface1** pSurfacesPool, mfxU16 nPoolSize)
int GetFreeSurfaceIndex(mfxFrameSurface1 **pSurfacesPool, mfxU16 nPoolSize)
{
if (pSurfacesPool)
for (mfxU16 i = 0; i < nPoolSize; i++)
if (0 == pSurfacesPool[i]->Data.Locked)
return i;
return MFX_ERR_NOT_FOUND;
if (pSurfacesPool)
for (mfxU16 i = 0; i < nPoolSize; i++)
if (0 == pSurfacesPool[i]->Data.Locked)
return i;
return MFX_ERR_NOT_FOUND;
}
char mfxFrameTypeString(mfxU16 FrameType)
{
mfxU8 FrameTmp = FrameType & 0xF;
char FrameTypeOut;
switch (FrameTmp) {
case MFX_FRAMETYPE_I:
FrameTypeOut = 'I';
break;
case MFX_FRAMETYPE_P:
FrameTypeOut = 'P';
break;
case MFX_FRAMETYPE_B:
FrameTypeOut = 'B';
break;
default:
FrameTypeOut = '*';
}
return FrameTypeOut;
mfxU8 FrameTmp = FrameType & 0xF;
char FrameTypeOut;
switch (FrameTmp) {
case MFX_FRAMETYPE_I:
FrameTypeOut = 'I';
break;
case MFX_FRAMETYPE_P:
FrameTypeOut = 'P';
break;
case MFX_FRAMETYPE_B:
FrameTypeOut = 'B';
break;
default:
FrameTypeOut = '*';
}
return FrameTypeOut;
}

View File

@@ -28,38 +28,79 @@ Copyright(c) 2005-2014 Intel Corporation. All Rights Reserved.
// =================================================================
// Helper macro definitions...
#define MSDK_PRINT_RET_MSG(ERR) {PrintErrString(ERR, __FILE__, __LINE__);}
#define MSDK_CHECK_RESULT(P, X, ERR) {if ((X) > (P)) {MSDK_PRINT_RET_MSG(ERR); return ERR;}}
#define MSDK_CHECK_POINTER(P, ERR) {if (!(P)) {MSDK_PRINT_RET_MSG(ERR); return ERR;}}
#define MSDK_CHECK_ERROR(P, X, ERR) {if ((X) == (P)) {MSDK_PRINT_RET_MSG(ERR); return ERR;}}
#define MSDK_IGNORE_MFX_STS(P, X) {if ((X) == (P)) {P = MFX_ERR_NONE;}}
#define MSDK_BREAK_ON_ERROR(P) {if (MFX_ERR_NONE != (P)) break;}
#define MSDK_SAFE_DELETE_ARRAY(P) {if (P) {delete[] P; P = NULL;}}
#define MSDK_ALIGN32(X) (((mfxU32)((X)+31)) & (~ (mfxU32)31))
#define MSDK_ALIGN16(value) (((value + 15) >> 4) << 4)
#define MSDK_SAFE_RELEASE(X) {if (X) { X->Release(); X = NULL; }}
#define MSDK_MAX(A, B) (((A) > (B)) ? (A) : (B))
#define MSDK_PRINT_RET_MSG(ERR) \
{ \
PrintErrString(ERR, __FILE__, __LINE__); \
}
#define MSDK_CHECK_RESULT(P, X, ERR) \
{ \
if ((X) > (P)) { \
MSDK_PRINT_RET_MSG(ERR); \
return ERR; \
} \
}
#define MSDK_CHECK_POINTER(P, ERR) \
{ \
if (!(P)) { \
MSDK_PRINT_RET_MSG(ERR); \
return ERR; \
} \
}
#define MSDK_CHECK_ERROR(P, X, ERR) \
{ \
if ((X) == (P)) { \
MSDK_PRINT_RET_MSG(ERR); \
return ERR; \
} \
}
#define MSDK_IGNORE_MFX_STS(P, X) \
{ \
if ((X) == (P)) { \
P = MFX_ERR_NONE; \
} \
}
#define MSDK_BREAK_ON_ERROR(P) \
{ \
if (MFX_ERR_NONE != (P)) \
break; \
}
#define MSDK_SAFE_DELETE_ARRAY(P) \
{ \
if (P) { \
delete[] P; \
P = NULL; \
} \
}
#define MSDK_ALIGN32(X) (((mfxU32)((X) + 31)) & (~(mfxU32)31))
#define MSDK_ALIGN16(value) (((value + 15) >> 4) << 4)
#define MSDK_SAFE_RELEASE(X) \
{ \
if (X) { \
X->Release(); \
X = NULL; \
} \
}
#define MSDK_MAX(A, B) (((A) > (B)) ? (A) : (B))
// Usage of the following two macros are only required for certain Windows DirectX11 use cases
#define WILL_READ 0x1000
#define WILL_READ 0x1000
#define WILL_WRITE 0x2000
// =================================================================
// Intel Media SDK memory allocator entrypoints....
// Implementation of this functions is OS/Memory type specific.
mfxStatus simple_alloc(mfxHDL pthis, mfxFrameAllocRequest* request, mfxFrameAllocResponse* response);
mfxStatus simple_lock(mfxHDL pthis, mfxMemId mid, mfxFrameData* ptr);
mfxStatus simple_unlock(mfxHDL pthis, mfxMemId mid, mfxFrameData* ptr);
mfxStatus simple_gethdl(mfxHDL pthis, mfxMemId mid, mfxHDL* handle);
mfxStatus simple_free(mfxHDL pthis, mfxFrameAllocResponse* response);
mfxStatus simple_alloc(mfxHDL pthis, mfxFrameAllocRequest *request,
mfxFrameAllocResponse *response);
mfxStatus simple_lock(mfxHDL pthis, mfxMemId mid, mfxFrameData *ptr);
mfxStatus simple_unlock(mfxHDL pthis, mfxMemId mid, mfxFrameData *ptr);
mfxStatus simple_gethdl(mfxHDL pthis, mfxMemId mid, mfxHDL *handle);
mfxStatus simple_free(mfxHDL pthis, mfxFrameAllocResponse *response);
// =================================================================
// Utility functions, not directly tied to Media SDK functionality
//
void PrintErrString(int err,const char* filestr,int line);
void PrintErrString(int err, const char *filestr, int line);
// LoadRawFrame: Reads raw frame from YUV file (YV12) into NV12 surface
// - YV12 is a more common format for YUV files than NV12 (therefore the conversion during read and write)
@@ -67,35 +108,38 @@ void PrintErrString(int err,const char* filestr,int line);
// LoadRawRGBFrame: Reads raw RGB32 frames from file into RGB32 surface
// - For the simulation case (fSource = NULL), the surface is filled with default image data
mfxStatus LoadRawFrame(mfxFrameSurface1* pSurface, FILE* fSource);
mfxStatus LoadRawRGBFrame(mfxFrameSurface1* pSurface, FILE* fSource);
mfxStatus LoadRawFrame(mfxFrameSurface1 *pSurface, FILE *fSource);
mfxStatus LoadRawRGBFrame(mfxFrameSurface1 *pSurface, FILE *fSource);
// Write raw YUV (NV12) surface to YUV (YV12) file
mfxStatus WriteRawFrame(mfxFrameSurface1* pSurface, FILE* fSink);
mfxStatus WriteRawFrame(mfxFrameSurface1 *pSurface, FILE *fSink);
// Write bit stream data for frame to file
mfxStatus WriteBitStreamFrame(mfxBitstream* pMfxBitstream, FILE* fSink);
mfxStatus WriteBitStreamFrame(mfxBitstream *pMfxBitstream, FILE *fSink);
// Read bit stream data from file. Stream is read as large chunks (= many frames)
mfxStatus ReadBitStreamData(mfxBitstream* pBS, FILE* fSource);
mfxStatus ReadBitStreamData(mfxBitstream *pBS, FILE *fSource);
void ClearYUVSurfaceSysMem(mfxFrameSurface1* pSfc, mfxU16 width, mfxU16 height);
void ClearYUVSurfaceSysMem(mfxFrameSurface1 *pSfc, mfxU16 width, mfxU16 height);
void ClearYUVSurfaceVMem(mfxMemId memId);
void ClearRGBSurfaceVMem(mfxMemId memId);
// Get free raw frame surface
int GetFreeSurfaceIndex(mfxFrameSurface1** pSurfacesPool, mfxU16 nPoolSize);
int GetFreeSurfaceIndex(mfxFrameSurface1 **pSurfacesPool, mfxU16 nPoolSize);
// For use with asynchronous task management
typedef struct {
mfxBitstream mfxBS;
mfxSyncPoint syncp;
mfxBitstream mfxBS;
mfxSyncPoint syncp;
} Task;
// Get free task
int GetFreeTaskIndex(Task* pTaskPool, mfxU16 nPoolSize);
int GetFreeTaskIndex(Task *pTaskPool, mfxU16 nPoolSize);
// Initialize Intel Media SDK Session, device/display and memory manager
mfxStatus Initialize(mfxIMPL impl, mfxVersion ver, MFXVideoSession* pSession, mfxFrameAllocator* pmfxAllocator, mfxHDL *deviceHandle = NULL, bool bCreateSharedHandles = false, bool dx9hack = false);
mfxStatus Initialize(mfxIMPL impl, mfxVersion ver, MFXVideoSession *pSession,
mfxFrameAllocator *pmfxAllocator,
mfxHDL *deviceHandle = NULL,
bool bCreateSharedHandles = false, bool dx9hack = false);
// Release resources (device/display)
void Release();
@@ -103,7 +147,7 @@ void Release();
// Convert frame type to string
char mfxFrameTypeString(mfxU16 FrameType);
void mfxGetTime(mfxTime* timestamp);
void mfxGetTime(mfxTime *timestamp);
//void mfxInitTime(); might need this for Windows
double TimeDiffMsec(mfxTime tfinish, mfxTime tstart);

View File

@@ -23,98 +23,109 @@ Copyright(c) 2005-2014 Intel Corporation. All Rights Reserved.
* Windows implementation of OS-specific utility functions
*/
mfxStatus Initialize(mfxIMPL impl, mfxVersion ver, MFXVideoSession* pSession, mfxFrameAllocator* pmfxAllocator, mfxHDL *deviceHandle, bool bCreateSharedHandles, bool dx9hack)
mfxStatus Initialize(mfxIMPL impl, mfxVersion ver, MFXVideoSession *pSession,
mfxFrameAllocator *pmfxAllocator, mfxHDL *deviceHandle,
bool bCreateSharedHandles, bool dx9hack)
{
bCreateSharedHandles; // (Hugh) Currently unused
pmfxAllocator; // (Hugh) Currently unused
bCreateSharedHandles; // (Hugh) Currently unused
pmfxAllocator; // (Hugh) Currently unused
mfxStatus sts = MFX_ERR_NONE;
mfxStatus sts = MFX_ERR_NONE;
// If mfxFrameAllocator is provided it means we need to setup DirectX device and memory allocator
if (pmfxAllocator && !dx9hack) {
// Initialize Intel Media SDK Session
sts = pSession->Init(impl, &ver);
MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts);
// If mfxFrameAllocator is provided it means we need to setup DirectX device and memory allocator
if (pmfxAllocator && !dx9hack) {
// Initialize Intel Media SDK Session
sts = pSession->Init(impl, &ver);
MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts);
// Create DirectX device context
if (deviceHandle == NULL || *deviceHandle == NULL) {
sts = CreateHWDevice(*pSession, deviceHandle, NULL, bCreateSharedHandles);
MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts);
}
// Create DirectX device context
if (deviceHandle == NULL || *deviceHandle == NULL) {
sts = CreateHWDevice(*pSession, deviceHandle, NULL,
bCreateSharedHandles);
MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts);
}
if (deviceHandle == NULL || *deviceHandle == NULL) return MFX_ERR_DEVICE_FAILED;
if (deviceHandle == NULL || *deviceHandle == NULL)
return MFX_ERR_DEVICE_FAILED;
// Provide device manager to Media SDK
sts = pSession->SetHandle(DEVICE_MGR_TYPE, *deviceHandle);
MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts);
// Provide device manager to Media SDK
sts = pSession->SetHandle(DEVICE_MGR_TYPE, *deviceHandle);
MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts);
pmfxAllocator->pthis = *pSession; // We use Media SDK session ID as the allocation identifier
pmfxAllocator->Alloc = simple_alloc;
pmfxAllocator->Free = simple_free;
pmfxAllocator->Lock = simple_lock;
pmfxAllocator->Unlock = simple_unlock;
pmfxAllocator->GetHDL = simple_gethdl;
pmfxAllocator->pthis =
*pSession; // We use Media SDK session ID as the allocation identifier
pmfxAllocator->Alloc = simple_alloc;
pmfxAllocator->Free = simple_free;
pmfxAllocator->Lock = simple_lock;
pmfxAllocator->Unlock = simple_unlock;
pmfxAllocator->GetHDL = simple_gethdl;
// Since we are using video memory we must provide Media SDK with an external allocator
sts = pSession->SetFrameAllocator(pmfxAllocator);
MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts);
// Since we are using video memory we must provide Media SDK with an external allocator
sts = pSession->SetFrameAllocator(pmfxAllocator);
MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts);
} else if (pmfxAllocator && dx9hack) {
// Initialize Intel Media SDK Session
sts = pSession->Init(impl, &ver);
MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts);
} else if (pmfxAllocator && dx9hack) {
// Initialize Intel Media SDK Session
sts = pSession->Init(impl, &ver);
MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts);
// Create DirectX device context
if (deviceHandle == NULL || *deviceHandle == NULL ) {
sts = DX9_CreateHWDevice(*pSession, deviceHandle, NULL, false);
MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts);
}
if (*deviceHandle == NULL) return MFX_ERR_DEVICE_FAILED;
// Create DirectX device context
if (deviceHandle == NULL || *deviceHandle == NULL) {
sts = DX9_CreateHWDevice(*pSession, deviceHandle, NULL,
false);
MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts);
}
if (*deviceHandle == NULL)
return MFX_ERR_DEVICE_FAILED;
// Provide device manager to Media SDK
sts = pSession->SetHandle(MFX_HANDLE_D3D9_DEVICE_MANAGER, *deviceHandle);
MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts);
// Provide device manager to Media SDK
sts = pSession->SetHandle(MFX_HANDLE_D3D9_DEVICE_MANAGER,
*deviceHandle);
MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts);
pmfxAllocator->pthis = *pSession; // We use Media SDK session ID as the allocation identifier
pmfxAllocator->Alloc = dx9_simple_alloc;
pmfxAllocator->Free = dx9_simple_free;
pmfxAllocator->Lock = dx9_simple_lock;
pmfxAllocator->Unlock = dx9_simple_unlock;
pmfxAllocator->GetHDL = dx9_simple_gethdl;
pmfxAllocator->pthis =
*pSession; // We use Media SDK session ID as the allocation identifier
pmfxAllocator->Alloc = dx9_simple_alloc;
pmfxAllocator->Free = dx9_simple_free;
pmfxAllocator->Lock = dx9_simple_lock;
pmfxAllocator->Unlock = dx9_simple_unlock;
pmfxAllocator->GetHDL = dx9_simple_gethdl;
// Since we are using video memory we must provide Media SDK with an external allocator
sts = pSession->SetFrameAllocator(pmfxAllocator);
MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts);
// Since we are using video memory we must provide Media SDK with an external allocator
sts = pSession->SetFrameAllocator(pmfxAllocator);
MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts);
} else {
// Initialize Intel Media SDK Session
sts = pSession->Init(impl, &ver);
MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts);
}
return sts;
} else {
// Initialize Intel Media SDK Session
sts = pSession->Init(impl, &ver);
MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts);
}
return sts;
}
void Release()
{
#if defined(DX9_D3D) || defined(DX11_D3D)
CleanupHWDevice();
DX9_CleanupHWDevice();
CleanupHWDevice();
DX9_CleanupHWDevice();
#endif
}
void mfxGetTime(mfxTime* timestamp)
void mfxGetTime(mfxTime *timestamp)
{
QueryPerformanceCounter(timestamp);
QueryPerformanceCounter(timestamp);
}
double TimeDiffMsec(mfxTime tfinish, mfxTime tstart)
{
static LARGE_INTEGER tFreq = { 0 };
static LARGE_INTEGER tFreq = {0};
if (!tFreq.QuadPart) QueryPerformanceFrequency(&tFreq);
if (!tFreq.QuadPart)
QueryPerformanceFrequency(&tFreq);
double freq = (double)tFreq.QuadPart;
return 1000.0 * ((double)tfinish.QuadPart - (double)tstart.QuadPart) / freq;
double freq = (double)tFreq.QuadPart;
return 1000.0 * ((double)tfinish.QuadPart - (double)tstart.QuadPart) /
freq;
}
/* (Hugh) Functions currently unused */

View File

@@ -14,7 +14,7 @@ Copyright(c) 2011-2015 Intel Corporation. All Rights Reserved.
//prefast signature used in combaseapi.h
#ifndef _PREFAST_
#pragma warning(disable:4068)
#pragma warning(disable : 4068)
#endif
#include "device_directx9.h"
@@ -23,356 +23,343 @@ Copyright(c) 2011-2015 Intel Corporation. All Rights Reserved.
#include "atlbase.h"
// Macros
#define MSDK_ZERO_MEMORY(VAR) {memset(&VAR, 0, sizeof(VAR));}
#define MSDK_MEMCPY_VAR(dstVarName, src, count) memcpy_s(&(dstVarName), sizeof(dstVarName), (src), (count))
#define MSDK_ZERO_MEMORY(VAR) \
{ \
memset(&VAR, 0, sizeof(VAR)); \
}
#define MSDK_MEMCPY_VAR(dstVarName, src, count) \
memcpy_s(&(dstVarName), sizeof(dstVarName), (src), (count))
CD3D9Device::CD3D9Device()
{
m_pD3D9 = NULL;
m_pD3DD9 = NULL;
m_pDeviceManager9 = NULL;
MSDK_ZERO_MEMORY(m_D3DPP);
m_resetToken = 0;
m_pD3D9 = NULL;
m_pD3DD9 = NULL;
m_pDeviceManager9 = NULL;
MSDK_ZERO_MEMORY(m_D3DPP);
m_resetToken = 0;
m_nViews = 0;
m_pS3DControl = NULL;
m_nViews = 0;
m_pS3DControl = NULL;
MSDK_ZERO_MEMORY(m_backBufferDesc);
m_pDXVAVPS = NULL;
m_pDXVAVP_Left = NULL;
m_pDXVAVP_Right = NULL;
MSDK_ZERO_MEMORY(m_backBufferDesc);
m_pDXVAVPS = NULL;
m_pDXVAVP_Left = NULL;
m_pDXVAVP_Right = NULL;
MSDK_ZERO_MEMORY(m_targetRect);
MSDK_ZERO_MEMORY(m_targetRect);
MSDK_ZERO_MEMORY(m_VideoDesc);
MSDK_ZERO_MEMORY(m_BltParams);
MSDK_ZERO_MEMORY(m_Sample);
MSDK_ZERO_MEMORY(m_VideoDesc);
MSDK_ZERO_MEMORY(m_BltParams);
MSDK_ZERO_MEMORY(m_Sample);
// Initialize DXVA structures
// Initialize DXVA structures
DXVA2_AYUVSample16 color = {
0x8000, // Cr
0x8000, // Cb
0x1000, // Y
0xffff // Alpha
};
DXVA2_AYUVSample16 color = {
0x8000, // Cr
0x8000, // Cb
0x1000, // Y
0xffff // Alpha
};
DXVA2_ExtendedFormat format = { // DestFormat
DXVA2_SampleProgressiveFrame, // SampleFormat
DXVA2_VideoChromaSubsampling_MPEG2, // VideoChromaSubsampling
DXVA_NominalRange_0_255, // NominalRange
DXVA2_VideoTransferMatrix_BT709, // VideoTransferMatrix
DXVA2_VideoLighting_bright, // VideoLighting
DXVA2_VideoPrimaries_BT709, // VideoPrimaries
DXVA2_VideoTransFunc_709 // VideoTransferFunction
};
DXVA2_ExtendedFormat format = {
// DestFormat
DXVA2_SampleProgressiveFrame, // SampleFormat
DXVA2_VideoChromaSubsampling_MPEG2, // VideoChromaSubsampling
DXVA_NominalRange_0_255, // NominalRange
DXVA2_VideoTransferMatrix_BT709, // VideoTransferMatrix
DXVA2_VideoLighting_bright, // VideoLighting
DXVA2_VideoPrimaries_BT709, // VideoPrimaries
DXVA2_VideoTransFunc_709 // VideoTransferFunction
};
// init m_VideoDesc structure
MSDK_MEMCPY_VAR(m_VideoDesc.SampleFormat, &format, sizeof(DXVA2_ExtendedFormat));
m_VideoDesc.SampleWidth = 0;
m_VideoDesc.SampleHeight = 0;
m_VideoDesc.InputSampleFreq.Numerator = 60;
m_VideoDesc.InputSampleFreq.Denominator = 1;
m_VideoDesc.OutputFrameFreq.Numerator = 60;
m_VideoDesc.OutputFrameFreq.Denominator = 1;
// init m_VideoDesc structure
MSDK_MEMCPY_VAR(m_VideoDesc.SampleFormat, &format,
sizeof(DXVA2_ExtendedFormat));
m_VideoDesc.SampleWidth = 0;
m_VideoDesc.SampleHeight = 0;
m_VideoDesc.InputSampleFreq.Numerator = 60;
m_VideoDesc.InputSampleFreq.Denominator = 1;
m_VideoDesc.OutputFrameFreq.Numerator = 60;
m_VideoDesc.OutputFrameFreq.Denominator = 1;
// init m_BltParams structure
MSDK_MEMCPY_VAR(m_BltParams.DestFormat, &format, sizeof(DXVA2_ExtendedFormat));
MSDK_MEMCPY_VAR(m_BltParams.BackgroundColor, &color, sizeof(DXVA2_AYUVSample16));
// init m_BltParams structure
MSDK_MEMCPY_VAR(m_BltParams.DestFormat, &format,
sizeof(DXVA2_ExtendedFormat));
MSDK_MEMCPY_VAR(m_BltParams.BackgroundColor, &color,
sizeof(DXVA2_AYUVSample16));
// init m_Sample structure
m_Sample.Start = 0;
m_Sample.End = 1;
m_Sample.SampleFormat = format;
m_Sample.PlanarAlpha.Fraction = 0;
m_Sample.PlanarAlpha.Value = 1;
// init m_Sample structure
m_Sample.Start = 0;
m_Sample.End = 1;
m_Sample.SampleFormat = format;
m_Sample.PlanarAlpha.Fraction = 0;
m_Sample.PlanarAlpha.Value = 1;
m_bIsA2rgb10 = FALSE;
m_bIsA2rgb10 = FALSE;
}
bool CD3D9Device::CheckOverlaySupport()
{
D3DCAPS9 d3d9caps;
D3DOVERLAYCAPS d3doverlaycaps = {0};
IDirect3D9ExOverlayExtension *d3d9overlay = NULL;
bool overlaySupported = false;
D3DCAPS9 d3d9caps;
D3DOVERLAYCAPS d3doverlaycaps = {0};
IDirect3D9ExOverlayExtension *d3d9overlay = NULL;
bool overlaySupported = false;
memset(&d3d9caps, 0, sizeof(d3d9caps));
HRESULT hr = m_pD3D9->GetDeviceCaps(D3DADAPTER_DEFAULT, D3DDEVTYPE_HAL, &d3d9caps);
if (FAILED(hr) || !(d3d9caps.Caps & D3DCAPS_OVERLAY))
{
overlaySupported = false;
}
else
{
hr = m_pD3D9->QueryInterface(IID_PPV_ARGS(&d3d9overlay));
if (FAILED(hr) || (d3d9overlay == NULL))
{
overlaySupported = false;
}
else
{
hr = d3d9overlay->CheckDeviceOverlayType(D3DADAPTER_DEFAULT, D3DDEVTYPE_HAL,
m_D3DPP.BackBufferWidth,
m_D3DPP.BackBufferHeight,
m_D3DPP.BackBufferFormat, NULL,
D3DDISPLAYROTATION_IDENTITY, &d3doverlaycaps);
MSDK_SAFE_RELEASE(d3d9overlay);
memset(&d3d9caps, 0, sizeof(d3d9caps));
HRESULT hr = m_pD3D9->GetDeviceCaps(D3DADAPTER_DEFAULT, D3DDEVTYPE_HAL,
&d3d9caps);
if (FAILED(hr) || !(d3d9caps.Caps & D3DCAPS_OVERLAY)) {
overlaySupported = false;
} else {
hr = m_pD3D9->QueryInterface(IID_PPV_ARGS(&d3d9overlay));
if (FAILED(hr) || (d3d9overlay == NULL)) {
overlaySupported = false;
} else {
hr = d3d9overlay->CheckDeviceOverlayType(
D3DADAPTER_DEFAULT, D3DDEVTYPE_HAL,
m_D3DPP.BackBufferWidth,
m_D3DPP.BackBufferHeight,
m_D3DPP.BackBufferFormat, NULL,
D3DDISPLAYROTATION_IDENTITY, &d3doverlaycaps);
MSDK_SAFE_RELEASE(d3d9overlay);
if (FAILED(hr))
{
overlaySupported = false;
}
else
{
overlaySupported = true;
}
}
}
if (FAILED(hr)) {
overlaySupported = false;
} else {
overlaySupported = true;
}
}
}
return overlaySupported;
return overlaySupported;
}
mfxStatus CD3D9Device::FillD3DPP(mfxHDL hWindow, mfxU16 nViews, D3DPRESENT_PARAMETERS &D3DPP)
mfxStatus CD3D9Device::FillD3DPP(mfxHDL hWindow, mfxU16 nViews,
D3DPRESENT_PARAMETERS &D3DPP)
{
mfxStatus sts = MFX_ERR_NONE;
mfxStatus sts = MFX_ERR_NONE;
D3DPP.Windowed = true;
D3DPP.hDeviceWindow = (HWND)hWindow;
D3DPP.Windowed = true;
D3DPP.hDeviceWindow = (HWND)hWindow;
D3DPP.Flags = D3DPRESENTFLAG_VIDEO;
D3DPP.FullScreen_RefreshRateInHz = D3DPRESENT_RATE_DEFAULT;
D3DPP.PresentationInterval = D3DPRESENT_INTERVAL_ONE; // note that this setting leads to an implicit timeBeginPeriod call
D3DPP.BackBufferCount = 1;
D3DPP.BackBufferFormat = (m_bIsA2rgb10) ? D3DFMT_A2R10G10B10 : D3DFMT_X8R8G8B8;
D3DPP.Flags = D3DPRESENTFLAG_VIDEO;
D3DPP.FullScreen_RefreshRateInHz = D3DPRESENT_RATE_DEFAULT;
D3DPP.PresentationInterval =
D3DPRESENT_INTERVAL_ONE; // note that this setting leads to an implicit timeBeginPeriod call
D3DPP.BackBufferCount = 1;
D3DPP.BackBufferFormat = (m_bIsA2rgb10) ? D3DFMT_A2R10G10B10
: D3DFMT_X8R8G8B8;
if (hWindow)
{
RECT r;
GetClientRect((HWND)hWindow, &r);
int x = GetSystemMetrics(SM_CXSCREEN);
int y = GetSystemMetrics(SM_CYSCREEN);
D3DPP.BackBufferWidth = min(r.right - r.left, x);
D3DPP.BackBufferHeight = min(r.bottom - r.top, y);
}
else
{
D3DPP.BackBufferWidth = GetSystemMetrics(SM_CYSCREEN);
D3DPP.BackBufferHeight = GetSystemMetrics(SM_CYSCREEN);
}
//
// Mark the back buffer lockable if software DXVA2 could be used.
// This is because software DXVA2 device requires a lockable render target
// for the optimal performance.
//
{
D3DPP.Flags |= D3DPRESENTFLAG_LOCKABLE_BACKBUFFER;
}
if (hWindow) {
RECT r;
GetClientRect((HWND)hWindow, &r);
int x = GetSystemMetrics(SM_CXSCREEN);
int y = GetSystemMetrics(SM_CYSCREEN);
D3DPP.BackBufferWidth = min(r.right - r.left, x);
D3DPP.BackBufferHeight = min(r.bottom - r.top, y);
} else {
D3DPP.BackBufferWidth = GetSystemMetrics(SM_CYSCREEN);
D3DPP.BackBufferHeight = GetSystemMetrics(SM_CYSCREEN);
}
//
// Mark the back buffer lockable if software DXVA2 could be used.
// This is because software DXVA2 device requires a lockable render target
// for the optimal performance.
//
{
D3DPP.Flags |= D3DPRESENTFLAG_LOCKABLE_BACKBUFFER;
}
bool isOverlaySupported = CheckOverlaySupport();
if (2 == nViews && !isOverlaySupported)
return MFX_ERR_UNSUPPORTED;
bool isOverlaySupported = CheckOverlaySupport();
if (2 == nViews && !isOverlaySupported)
return MFX_ERR_UNSUPPORTED;
bool needOverlay = (2 == nViews) ? true : false;
bool needOverlay = (2 == nViews) ? true : false;
D3DPP.SwapEffect = needOverlay ? D3DSWAPEFFECT_OVERLAY : D3DSWAPEFFECT_DISCARD;
D3DPP.SwapEffect = needOverlay ? D3DSWAPEFFECT_OVERLAY
: D3DSWAPEFFECT_DISCARD;
return sts;
return sts;
}
mfxStatus CD3D9Device::Init(
mfxHDL hWindow,
mfxU16 nViews,
mfxU32 nAdapterNum)
mfxStatus CD3D9Device::Init(mfxHDL hWindow, mfxU16 nViews, mfxU32 nAdapterNum)
{
mfxStatus sts = MFX_ERR_NONE;
mfxStatus sts = MFX_ERR_NONE;
if (2 < nViews)
return MFX_ERR_UNSUPPORTED;
if (2 < nViews)
return MFX_ERR_UNSUPPORTED;
m_nViews = nViews;
m_nViews = nViews;
HRESULT hr = Direct3DCreate9Ex(D3D_SDK_VERSION, &m_pD3D9);
if (!m_pD3D9 || FAILED(hr))
return MFX_ERR_DEVICE_FAILED;
HRESULT hr = Direct3DCreate9Ex(D3D_SDK_VERSION, &m_pD3D9);
if (!m_pD3D9 || FAILED(hr))
return MFX_ERR_DEVICE_FAILED;
ZeroMemory(&m_D3DPP, sizeof(m_D3DPP));
sts = FillD3DPP(hWindow, nViews, m_D3DPP);
MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts);
ZeroMemory(&m_D3DPP, sizeof(m_D3DPP));
sts = FillD3DPP(hWindow, nViews, m_D3DPP);
MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts);
hr = m_pD3D9->CreateDeviceEx(
nAdapterNum,
D3DDEVTYPE_HAL,
(HWND)hWindow,
D3DCREATE_SOFTWARE_VERTEXPROCESSING | D3DCREATE_MULTITHREADED | D3DCREATE_FPU_PRESERVE,
&m_D3DPP,
NULL,
&m_pD3DD9);
if (FAILED(hr))
return MFX_ERR_NULL_PTR;
hr = m_pD3D9->CreateDeviceEx(nAdapterNum, D3DDEVTYPE_HAL, (HWND)hWindow,
D3DCREATE_SOFTWARE_VERTEXPROCESSING |
D3DCREATE_MULTITHREADED |
D3DCREATE_FPU_PRESERVE,
&m_D3DPP, NULL, &m_pD3DD9);
if (FAILED(hr))
return MFX_ERR_NULL_PTR;
if(hWindow)
{
hr = m_pD3DD9->ResetEx(&m_D3DPP, NULL);
if (FAILED(hr))
return MFX_ERR_UNDEFINED_BEHAVIOR;
hr = m_pD3DD9->Clear(0, NULL, D3DCLEAR_TARGET, D3DCOLOR_XRGB(0, 0, 0), 1.0f, 0);
if (FAILED(hr))
return MFX_ERR_UNDEFINED_BEHAVIOR;
}
UINT resetToken = 0;
if (hWindow) {
hr = m_pD3DD9->ResetEx(&m_D3DPP, NULL);
if (FAILED(hr))
return MFX_ERR_UNDEFINED_BEHAVIOR;
hr = m_pD3DD9->Clear(0, NULL, D3DCLEAR_TARGET,
D3DCOLOR_XRGB(0, 0, 0), 1.0f, 0);
if (FAILED(hr))
return MFX_ERR_UNDEFINED_BEHAVIOR;
}
UINT resetToken = 0;
hr = DXVA2CreateDirect3DDeviceManager9(&resetToken, &m_pDeviceManager9);
if (FAILED(hr))
return MFX_ERR_NULL_PTR;
hr = DXVA2CreateDirect3DDeviceManager9(&resetToken, &m_pDeviceManager9);
if (FAILED(hr))
return MFX_ERR_NULL_PTR;
hr = m_pDeviceManager9->ResetDevice(m_pD3DD9, resetToken);
if (FAILED(hr))
return MFX_ERR_UNDEFINED_BEHAVIOR;
hr = m_pDeviceManager9->ResetDevice(m_pD3DD9, resetToken);
if (FAILED(hr))
return MFX_ERR_UNDEFINED_BEHAVIOR;
m_resetToken = resetToken;
m_resetToken = resetToken;
return sts;
return sts;
}
mfxStatus CD3D9Device::Reset()
{
HRESULT hr = NO_ERROR;
MSDK_CHECK_POINTER(m_pD3DD9, MFX_ERR_NULL_PTR);
HRESULT hr = NO_ERROR;
MSDK_CHECK_POINTER(m_pD3DD9, MFX_ERR_NULL_PTR);
if (m_D3DPP.Windowed)
{
RECT r;
GetClientRect((HWND)m_D3DPP.hDeviceWindow, &r);
int x = GetSystemMetrics(SM_CXSCREEN);
int y = GetSystemMetrics(SM_CYSCREEN);
m_D3DPP.BackBufferWidth = min(r.right - r.left, x);
m_D3DPP.BackBufferHeight = min(r.bottom - r.top, y);
}
else
{
m_D3DPP.BackBufferWidth = GetSystemMetrics(SM_CXSCREEN);
m_D3DPP.BackBufferHeight = GetSystemMetrics(SM_CYSCREEN);
}
if (m_D3DPP.Windowed) {
RECT r;
GetClientRect((HWND)m_D3DPP.hDeviceWindow, &r);
int x = GetSystemMetrics(SM_CXSCREEN);
int y = GetSystemMetrics(SM_CYSCREEN);
m_D3DPP.BackBufferWidth = min(r.right - r.left, x);
m_D3DPP.BackBufferHeight = min(r.bottom - r.top, y);
} else {
m_D3DPP.BackBufferWidth = GetSystemMetrics(SM_CXSCREEN);
m_D3DPP.BackBufferHeight = GetSystemMetrics(SM_CYSCREEN);
}
// Reset will change the parameters, so use a copy instead.
D3DPRESENT_PARAMETERS d3dpp = m_D3DPP;
hr = m_pD3DD9->ResetEx(&d3dpp, NULL);
// Reset will change the parameters, so use a copy instead.
D3DPRESENT_PARAMETERS d3dpp = m_D3DPP;
hr = m_pD3DD9->ResetEx(&d3dpp, NULL);
if (FAILED(hr))
return MFX_ERR_UNDEFINED_BEHAVIOR;
if (FAILED(hr))
return MFX_ERR_UNDEFINED_BEHAVIOR;
hr = m_pDeviceManager9->ResetDevice(m_pD3DD9, m_resetToken);
if (FAILED(hr))
return MFX_ERR_UNDEFINED_BEHAVIOR;
hr = m_pDeviceManager9->ResetDevice(m_pD3DD9, m_resetToken);
if (FAILED(hr))
return MFX_ERR_UNDEFINED_BEHAVIOR;
return MFX_ERR_NONE;
return MFX_ERR_NONE;
}
void CD3D9Device::Close()
{
MSDK_SAFE_RELEASE(m_pDXVAVP_Left);
MSDK_SAFE_RELEASE(m_pDXVAVP_Right);
MSDK_SAFE_RELEASE(m_pDXVAVPS);
MSDK_SAFE_RELEASE(m_pDXVAVP_Left);
MSDK_SAFE_RELEASE(m_pDXVAVP_Right);
MSDK_SAFE_RELEASE(m_pDXVAVPS);
MSDK_SAFE_RELEASE(m_pDeviceManager9);
MSDK_SAFE_RELEASE(m_pD3DD9);
MSDK_SAFE_RELEASE(m_pD3D9);
m_pS3DControl = NULL;
MSDK_SAFE_RELEASE(m_pDeviceManager9);
MSDK_SAFE_RELEASE(m_pD3DD9);
MSDK_SAFE_RELEASE(m_pD3D9);
m_pS3DControl = NULL;
}
CD3D9Device::~CD3D9Device()
{
Close();
Close();
}
mfxStatus CD3D9Device::GetHandle(mfxHandleType type, mfxHDL *pHdl)
{
if (MFX_HANDLE_DIRECT3D_DEVICE_MANAGER9 == type && pHdl != NULL)
{
*pHdl = m_pDeviceManager9;
if (MFX_HANDLE_DIRECT3D_DEVICE_MANAGER9 == type && pHdl != NULL) {
*pHdl = m_pDeviceManager9;
return MFX_ERR_NONE;
}
else if (MFX_HANDLE_GFXS3DCONTROL == type && pHdl != NULL)
{
*pHdl = m_pS3DControl;
return MFX_ERR_NONE;
} else if (MFX_HANDLE_GFXS3DCONTROL == type && pHdl != NULL) {
*pHdl = m_pS3DControl;
return MFX_ERR_NONE;
}
return MFX_ERR_UNSUPPORTED;
return MFX_ERR_NONE;
}
return MFX_ERR_UNSUPPORTED;
}
mfxStatus CD3D9Device::SetHandle(mfxHandleType type, mfxHDL hdl)
{
if (MFX_HANDLE_GFXS3DCONTROL == type && hdl != NULL)
{
m_pS3DControl = (IGFXS3DControl*)hdl;
return MFX_ERR_NONE;
}
else if (MFX_HANDLE_DEVICEWINDOW == type && hdl != NULL) //for render window handle
{
m_D3DPP.hDeviceWindow = (HWND)hdl;
return MFX_ERR_NONE;
}
return MFX_ERR_UNSUPPORTED;
if (MFX_HANDLE_GFXS3DCONTROL == type && hdl != NULL) {
m_pS3DControl = (IGFXS3DControl *)hdl;
return MFX_ERR_NONE;
} else if (MFX_HANDLE_DEVICEWINDOW == type &&
hdl != NULL) //for render window handle
{
m_D3DPP.hDeviceWindow = (HWND)hdl;
return MFX_ERR_NONE;
}
return MFX_ERR_UNSUPPORTED;
}
mfxStatus CD3D9Device::RenderFrame(mfxFrameSurface1 * pSurface, mfxFrameAllocator * pmfxAlloc)
mfxStatus CD3D9Device::RenderFrame(mfxFrameSurface1 *pSurface,
mfxFrameAllocator *pmfxAlloc)
{
HRESULT hr = S_OK;
HRESULT hr = S_OK;
if (!(1 == m_nViews || (2 == m_nViews && NULL != m_pS3DControl)))
return MFX_ERR_UNDEFINED_BEHAVIOR;
if (!(1 == m_nViews || (2 == m_nViews && NULL != m_pS3DControl)))
return MFX_ERR_UNDEFINED_BEHAVIOR;
MSDK_CHECK_POINTER(pSurface, MFX_ERR_NULL_PTR);
MSDK_CHECK_POINTER(m_pDeviceManager9, MFX_ERR_NOT_INITIALIZED);
MSDK_CHECK_POINTER(pmfxAlloc, MFX_ERR_NULL_PTR);
MSDK_CHECK_POINTER(pSurface, MFX_ERR_NULL_PTR);
MSDK_CHECK_POINTER(m_pDeviceManager9, MFX_ERR_NOT_INITIALIZED);
MSDK_CHECK_POINTER(pmfxAlloc, MFX_ERR_NULL_PTR);
// don't try to render second view if output rect changed since first view
if (2 == m_nViews && (0 != pSurface->Info.FrameId.ViewId))
return MFX_ERR_NONE;
// don't try to render second view if output rect changed since first view
if (2 == m_nViews && (0 != pSurface->Info.FrameId.ViewId))
return MFX_ERR_NONE;
hr = m_pD3DD9->TestCooperativeLevel();
hr = m_pD3DD9->TestCooperativeLevel();
switch (hr)
{
case D3D_OK :
break;
switch (hr) {
case D3D_OK:
break;
case D3DERR_DEVICELOST :
{
return MFX_ERR_DEVICE_LOST;
}
case D3DERR_DEVICELOST: {
return MFX_ERR_DEVICE_LOST;
}
case D3DERR_DEVICENOTRESET :
{
return MFX_ERR_UNKNOWN;
}
case D3DERR_DEVICENOTRESET: {
return MFX_ERR_UNKNOWN;
}
default :
{
return MFX_ERR_UNKNOWN;
}
}
default: {
return MFX_ERR_UNKNOWN;
}
}
CComPtr<IDirect3DSurface9> pBackBuffer;
hr = m_pD3DD9->GetBackBuffer(0, 0, D3DBACKBUFFER_TYPE_MONO, &pBackBuffer);
CComPtr<IDirect3DSurface9> pBackBuffer;
hr = m_pD3DD9->GetBackBuffer(0, 0, D3DBACKBUFFER_TYPE_MONO,
&pBackBuffer);
mfxHDLPair* dxMemId = (mfxHDLPair*)pSurface->Data.MemId;
mfxHDLPair *dxMemId = (mfxHDLPair *)pSurface->Data.MemId;
hr = m_pD3DD9->StretchRect((IDirect3DSurface9*)dxMemId->first, NULL, pBackBuffer, NULL, D3DTEXF_LINEAR);
if (FAILED(hr))
{
return MFX_ERR_UNKNOWN;
}
hr = m_pD3DD9->StretchRect((IDirect3DSurface9 *)dxMemId->first, NULL,
pBackBuffer, NULL, D3DTEXF_LINEAR);
if (FAILED(hr)) {
return MFX_ERR_UNKNOWN;
}
if (SUCCEEDED(hr)&& (1 == m_nViews || pSurface->Info.FrameId.ViewId == 1))
{
hr = m_pD3DD9->Present(NULL, NULL, NULL, NULL);
}
if (SUCCEEDED(hr) &&
(1 == m_nViews || pSurface->Info.FrameId.ViewId == 1)) {
hr = m_pD3DD9->Present(NULL, NULL, NULL, NULL);
}
return SUCCEEDED(hr) ? MFX_ERR_NONE : MFX_ERR_DEVICE_FAILED;
return SUCCEEDED(hr) ? MFX_ERR_NONE : MFX_ERR_DEVICE_FAILED;
}
/*

View File

@@ -10,7 +10,7 @@ Copyright(c) 2011-2014 Intel Corporation. All Rights Reserved.
#pragma once
#if defined( _WIN32 ) || defined ( _WIN64 )
#if defined(_WIN32) || defined(_WIN64)
#include "common_utils.h"
@@ -25,37 +25,34 @@ Copyright(c) 2011-2014 Intel Corporation. All Rights Reserved.
class IGFXS3DControl;
/// Base class for hw device
class CHWDevice
{
class CHWDevice {
public:
virtual ~CHWDevice(){}
/** Initializes device for requested processing.
virtual ~CHWDevice() {}
/** Initializes device for requested processing.
@param[in] hWindow Window handle to bundle device to.
@param[in] nViews Number of views to process.
@param[in] nAdapterNum Number of adapter to use
*/
virtual mfxStatus Init(
mfxHDL hWindow,
mfxU16 nViews,
mfxU32 nAdapterNum) = 0;
/// Reset device.
virtual mfxStatus Reset() = 0;
/// Get handle can be used for MFX session SetHandle calls
virtual mfxStatus GetHandle(mfxHandleType type, mfxHDL *pHdl) = 0;
/** Set handle.
virtual mfxStatus Init(mfxHDL hWindow, mfxU16 nViews,
mfxU32 nAdapterNum) = 0;
/// Reset device.
virtual mfxStatus Reset() = 0;
/// Get handle can be used for MFX session SetHandle calls
virtual mfxStatus GetHandle(mfxHandleType type, mfxHDL *pHdl) = 0;
/** Set handle.
Particular device implementation may require other objects to operate.
*/
virtual mfxStatus SetHandle(mfxHandleType type, mfxHDL hdl) = 0;
virtual mfxStatus RenderFrame(mfxFrameSurface1 * pSurface, mfxFrameAllocator * pmfxAlloc) = 0;
virtual void Close() = 0;
virtual mfxStatus SetHandle(mfxHandleType type, mfxHDL hdl) = 0;
virtual mfxStatus RenderFrame(mfxFrameSurface1 *pSurface,
mfxFrameAllocator *pmfxAlloc) = 0;
virtual void Close() = 0;
};
enum {
MFX_HANDLE_GFXS3DCONTROL = 0x100, /* A handle to the IGFXS3DControl instance */
MFX_HANDLE_DEVICEWINDOW = 0x101 /* A handle to the render window */
}; //mfxHandleType
enum { MFX_HANDLE_GFXS3DCONTROL =
0x100, /* A handle to the IGFXS3DControl instance */
MFX_HANDLE_DEVICEWINDOW = 0x101 /* A handle to the render window */
}; //mfxHandleType
/** Direct3D 9 device implementation.
@note Can be initialized for only 1 or two 2 views. Handle to
@@ -63,56 +60,59 @@ MFX_HANDLE_GFXS3DCONTROL must be set prior if initializing for 2 views.
@note Device always set D3DPRESENT_PARAMETERS::Windowed to TRUE.
*/
class CD3D9Device : public CHWDevice
{
class CD3D9Device : public CHWDevice {
public:
CD3D9Device();
virtual ~CD3D9Device();
CD3D9Device();
virtual ~CD3D9Device();
virtual mfxStatus Init(mfxHDL hWindow, mfxU16 nViews,
mfxU32 nAdapterNum);
virtual mfxStatus Reset();
virtual mfxStatus GetHandle(mfxHandleType type, mfxHDL *pHdl);
virtual mfxStatus SetHandle(mfxHandleType type, mfxHDL hdl);
virtual mfxStatus RenderFrame(mfxFrameSurface1 *pSurface,
mfxFrameAllocator *pmfxAlloc);
virtual void UpdateTitle(double /*fps*/) {}
virtual void Close();
void DefineFormat(bool isA2rgb10)
{
m_bIsA2rgb10 = (isA2rgb10) ? TRUE : FALSE;
}
virtual mfxStatus Init(
mfxHDL hWindow,
mfxU16 nViews,
mfxU32 nAdapterNum);
virtual mfxStatus Reset();
virtual mfxStatus GetHandle(mfxHandleType type, mfxHDL *pHdl);
virtual mfxStatus SetHandle(mfxHandleType type, mfxHDL hdl);
virtual mfxStatus RenderFrame(mfxFrameSurface1 * pSurface, mfxFrameAllocator * pmfxAlloc);
virtual void UpdateTitle(double /*fps*/) { }
virtual void Close() ;
void DefineFormat(bool isA2rgb10) { m_bIsA2rgb10 = (isA2rgb10) ? TRUE : FALSE; }
protected:
mfxStatus CreateVideoProcessors();
bool CheckOverlaySupport();
virtual mfxStatus FillD3DPP(mfxHDL hWindow, mfxU16 nViews, D3DPRESENT_PARAMETERS &D3DPP);
mfxStatus CreateVideoProcessors();
bool CheckOverlaySupport();
virtual mfxStatus FillD3DPP(mfxHDL hWindow, mfxU16 nViews,
D3DPRESENT_PARAMETERS &D3DPP);
private:
IDirect3D9Ex* m_pD3D9;
IDirect3DDevice9Ex* m_pD3DD9;
IDirect3DDeviceManager9* m_pDeviceManager9;
D3DPRESENT_PARAMETERS m_D3DPP;
UINT m_resetToken;
IDirect3D9Ex *m_pD3D9;
IDirect3DDevice9Ex *m_pD3DD9;
IDirect3DDeviceManager9 *m_pDeviceManager9;
D3DPRESENT_PARAMETERS m_D3DPP;
UINT m_resetToken;
mfxU16 m_nViews;
IGFXS3DControl* m_pS3DControl;
mfxU16 m_nViews;
IGFXS3DControl *m_pS3DControl;
D3DSURFACE_DESC m_backBufferDesc;
D3DSURFACE_DESC m_backBufferDesc;
// service required to create video processors
IDirectXVideoProcessorService *m_pDXVAVPS;
//left channel processor
IDirectXVideoProcessor *m_pDXVAVP_Left;
// right channel processor
IDirectXVideoProcessor *m_pDXVAVP_Right;
// service required to create video processors
IDirectXVideoProcessorService* m_pDXVAVPS;
//left channel processor
IDirectXVideoProcessor* m_pDXVAVP_Left;
// right channel processor
IDirectXVideoProcessor* m_pDXVAVP_Right;
// target rectangle
RECT m_targetRect;
// target rectangle
RECT m_targetRect;
// various structures for DXVA2 calls
DXVA2_VideoDesc m_VideoDesc;
DXVA2_VideoProcessBltParams m_BltParams;
DXVA2_VideoSample m_Sample;
// various structures for DXVA2 calls
DXVA2_VideoDesc m_VideoDesc;
DXVA2_VideoProcessBltParams m_BltParams;
DXVA2_VideoSample m_Sample;
BOOL m_bIsA2rgb10;
BOOL m_bIsA2rgb10;
};
#endif // #if defined( _WIN32 ) || defined ( _WIN64 )

View File

@@ -68,7 +68,7 @@ extern struct obs_encoder_info obs_qsv_encoder;
bool obs_module_load(void)
{
mfxIMPL impl = MFX_IMPL_HARDWARE_ANY | MFX_IMPL_VIA_D3D11;
mfxVersion ver = {{0 , 1}};
mfxVersion ver = {{0, 1}};
mfxSession session;
mfxStatus sts;

View File

@@ -68,29 +68,29 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "QSV_Encoder.h"
#include <Windows.h>
#define do_log(level, format, ...) \
#define do_log(level, format, ...) \
blog(level, "[qsv encoder: '%s'] " format, \
obs_encoder_get_name(obsqsv->encoder), ##__VA_ARGS__)
obs_encoder_get_name(obsqsv->encoder), ##__VA_ARGS__)
#define warn(format, ...) do_log(LOG_WARNING, format, ##__VA_ARGS__)
#define info(format, ...) do_log(LOG_INFO, format, ##__VA_ARGS__)
#define debug(format, ...) do_log(LOG_DEBUG, format, ##__VA_ARGS__)
#define warn(format, ...) do_log(LOG_WARNING, format, ##__VA_ARGS__)
#define info(format, ...) do_log(LOG_INFO, format, ##__VA_ARGS__)
#define debug(format, ...) do_log(LOG_DEBUG, format, ##__VA_ARGS__)
/* ------------------------------------------------------------------------- */
struct obs_qsv {
obs_encoder_t *encoder;
obs_encoder_t *encoder;
qsv_param_t params;
qsv_t *context;
qsv_param_t params;
qsv_t *context;
DARRAY(uint8_t) packet_data;
DARRAY(uint8_t) packet_data;
uint8_t *extra_data;
uint8_t *sei;
uint8_t *extra_data;
uint8_t *sei;
size_t extra_data_size;
size_t sei_size;
size_t extra_data_size;
size_t sei_size;
os_performance_token_t *performance_token;
};
@@ -98,12 +98,11 @@ struct obs_qsv {
/* ------------------------------------------------------------------------- */
static CRITICAL_SECTION g_QsvCs;
static unsigned short g_verMajor;
static unsigned short g_verMinor;
static int64_t g_pts2dtsShift;
static int64_t g_prevDts;
static bool g_bFirst;
static unsigned short g_verMajor;
static unsigned short g_verMinor;
static int64_t g_pts2dtsShift;
static int64_t g_prevDts;
static bool g_bFirst;
static const char *obs_qsv_getname(void *type_data)
{
@@ -169,33 +168,32 @@ static inline void add_strings(obs_property_t *list, const char *const *strings)
}
}
#define TEXT_SPEED obs_module_text("TargetUsage")
#define TEXT_TARGET_BITRATE obs_module_text("Bitrate")
#define TEXT_MAX_BITRATE obs_module_text("MaxBitrate")
#define TEXT_PROFILE obs_module_text("Profile")
#define TEXT_ASYNC_DEPTH obs_module_text("AsyncDepth")
#define TEXT_RATE_CONTROL obs_module_text("RateControl")
#define TEXT_ACCURACY obs_module_text("Accuracy")
#define TEXT_CONVERGENCE obs_module_text("Convergence")
#define TEXT_ICQ_QUALITY obs_module_text("ICQQuality")
#define TEXT_LA_DEPTH obs_module_text("LookAheadDepth")
#define TEXT_KEYINT_SEC obs_module_text("KeyframeIntervalSec")
#define TEXT_SPEED obs_module_text("TargetUsage")
#define TEXT_TARGET_BITRATE obs_module_text("Bitrate")
#define TEXT_MAX_BITRATE obs_module_text("MaxBitrate")
#define TEXT_PROFILE obs_module_text("Profile")
#define TEXT_ASYNC_DEPTH obs_module_text("AsyncDepth")
#define TEXT_RATE_CONTROL obs_module_text("RateControl")
#define TEXT_ACCURACY obs_module_text("Accuracy")
#define TEXT_CONVERGENCE obs_module_text("Convergence")
#define TEXT_ICQ_QUALITY obs_module_text("ICQQuality")
#define TEXT_LA_DEPTH obs_module_text("LookAheadDepth")
#define TEXT_KEYINT_SEC obs_module_text("KeyframeIntervalSec")
static bool rate_control_modified(obs_properties_t *ppts, obs_property_t *p,
obs_data_t *settings)
obs_data_t *settings)
{
const char *rate_control = obs_data_get_string(settings, "rate_control");
const char *rate_control =
obs_data_get_string(settings, "rate_control");
bool bVisible =
astrcmpi(rate_control, "VCM") == 0 ||
astrcmpi(rate_control, "VBR") == 0;
bool bVisible = astrcmpi(rate_control, "VCM") == 0 ||
astrcmpi(rate_control, "VBR") == 0;
p = obs_properties_get(ppts, "max_bitrate");
obs_property_set_visible(p, bVisible);
bVisible =
astrcmpi(rate_control, "CQP") == 0 ||
astrcmpi(rate_control, "LA_ICQ") == 0 ||
astrcmpi(rate_control, "ICQ") == 0;
bVisible = astrcmpi(rate_control, "CQP") == 0 ||
astrcmpi(rate_control, "LA_ICQ") == 0 ||
astrcmpi(rate_control, "ICQ") == 0;
p = obs_properties_get(ppts, "bitrate");
obs_property_set_visible(p, !bVisible);
@@ -214,12 +212,12 @@ static bool rate_control_modified(obs_properties_t *ppts, obs_property_t *p,
obs_property_set_visible(p, bVisible);
bVisible = astrcmpi(rate_control, "ICQ") == 0 ||
astrcmpi(rate_control, "LA_ICQ") == 0;
astrcmpi(rate_control, "LA_ICQ") == 0;
p = obs_properties_get(ppts, "icq_quality");
obs_property_set_visible(p, bVisible);
bVisible = astrcmpi(rate_control, "LA_ICQ") == 0 ||
astrcmpi(rate_control, "LA") == 0;
astrcmpi(rate_control, "LA") == 0;
p = obs_properties_get(ppts, "la_depth");
obs_property_set_visible(p, bVisible);
@@ -227,7 +225,7 @@ static bool rate_control_modified(obs_properties_t *ppts, obs_property_t *p,
}
static inline void add_rate_controls(obs_property_t *list,
const struct qsv_rate_control_info *rc)
const struct qsv_rate_control_info *rc)
{
enum qsv_cpu_platform plat = qsv_get_cpu_platform();
while (rc->name) {
@@ -245,36 +243,41 @@ static obs_properties_t *obs_qsv_props(void *unused)
obs_property_t *list;
list = obs_properties_add_list(props, "target_usage", TEXT_SPEED,
OBS_COMBO_TYPE_LIST, OBS_COMBO_FORMAT_STRING);
OBS_COMBO_TYPE_LIST,
OBS_COMBO_FORMAT_STRING);
add_strings(list, qsv_usage_names);
list = obs_properties_add_list(props, "profile", TEXT_PROFILE,
OBS_COMBO_TYPE_LIST, OBS_COMBO_FORMAT_STRING);
OBS_COMBO_TYPE_LIST,
OBS_COMBO_FORMAT_STRING);
add_strings(list, qsv_profile_names);
obs_properties_add_int(props, "keyint_sec", TEXT_KEYINT_SEC, 1, 20, 1);
obs_properties_add_int(props, "async_depth", TEXT_ASYNC_DEPTH, 1, 7, 1);
list = obs_properties_add_list(props, "rate_control", TEXT_RATE_CONTROL,
OBS_COMBO_TYPE_LIST, OBS_COMBO_FORMAT_STRING);
OBS_COMBO_TYPE_LIST,
OBS_COMBO_FORMAT_STRING);
add_rate_controls(list, qsv_ratecontrols);
obs_property_set_modified_callback(list, rate_control_modified);
obs_property_t *p;
p = obs_properties_add_int(props, "bitrate", TEXT_TARGET_BITRATE, 50,
10000000, 50);
10000000, 50);
obs_property_int_set_suffix(p, " Kbps");
p = obs_properties_add_int(props, "max_bitrate", TEXT_MAX_BITRATE, 50,
10000000, 50);
10000000, 50);
obs_property_int_set_suffix(p, " Kbps");
obs_properties_add_int(props, "accuracy", TEXT_ACCURACY, 0, 10000, 1);
obs_properties_add_int(props, "convergence", TEXT_CONVERGENCE, 0, 10, 1);
obs_properties_add_int(props, "convergence", TEXT_CONVERGENCE, 0, 10,
1);
obs_properties_add_int(props, "qpi", "QPI", 1, 51, 1);
obs_properties_add_int(props, "qpp", "QPP", 1, 51, 1);
obs_properties_add_int(props, "qpb", "QPB", 1, 51, 1);
obs_properties_add_int(props, "icq_quality", TEXT_ICQ_QUALITY, 1, 51, 1);
obs_properties_add_int(props, "icq_quality", TEXT_ICQ_QUALITY, 1, 51,
1);
obs_properties_add_int(props, "la_depth", TEXT_LA_DEPTH, 10, 100, 1);
return props;
@@ -285,9 +288,11 @@ static void update_params(struct obs_qsv *obsqsv, obs_data_t *settings)
video_t *video = obs_encoder_video(obsqsv->encoder);
const struct video_output_info *voi = video_output_get_info(video);
const char *target_usage = obs_data_get_string(settings, "target_usage");
const char *target_usage =
obs_data_get_string(settings, "target_usage");
const char *profile = obs_data_get_string(settings, "profile");
const char *rate_control = obs_data_get_string(settings, "rate_control");
const char *rate_control =
obs_data_get_string(settings, "rate_control");
int async_depth = (int)obs_data_get_int(settings, "async_depth");
int target_bitrate = (int)obs_data_get_int(settings, "bitrate");
int max_bitrate = (int)obs_data_get_int(settings, "max_bitrate");
@@ -369,44 +374,39 @@ static void update_params(struct obs_qsv *obsqsv, obs_data_t *settings)
info("settings:\n\trate_control: %s", rate_control);
if (obsqsv->params.nRateControl != MFX_RATECONTROL_LA_ICQ &&
obsqsv->params.nRateControl != MFX_RATECONTROL_ICQ &&
obsqsv->params.nRateControl != MFX_RATECONTROL_ICQ &&
obsqsv->params.nRateControl != MFX_RATECONTROL_CQP)
blog(LOG_INFO,
"\ttarget_bitrate: %d",
(int)obsqsv->params.nTargetBitRate);
blog(LOG_INFO, "\ttarget_bitrate: %d",
(int)obsqsv->params.nTargetBitRate);
if (obsqsv->params.nRateControl == MFX_RATECONTROL_VBR ||
obsqsv->params.nRateControl == MFX_RATECONTROL_VCM)
blog(LOG_INFO,
"\tmax_bitrate: %d",
(int)obsqsv->params.nMaxBitRate);
blog(LOG_INFO, "\tmax_bitrate: %d",
(int)obsqsv->params.nMaxBitRate);
if (obsqsv->params.nRateControl == MFX_RATECONTROL_LA_ICQ ||
obsqsv->params.nRateControl == MFX_RATECONTROL_ICQ)
blog(LOG_INFO,
"\tICQ Quality: %d",
(int)obsqsv->params.nICQQuality);
blog(LOG_INFO, "\tICQ Quality: %d",
(int)obsqsv->params.nICQQuality);
if (obsqsv->params.nRateControl == MFX_RATECONTROL_LA_ICQ ||
obsqsv->params.nRateControl == MFX_RATECONTROL_LA)
blog(LOG_INFO,
"\tLookahead Depth:%d",
(int)obsqsv->params.nLADEPTH);
blog(LOG_INFO, "\tLookahead Depth:%d",
(int)obsqsv->params.nLADEPTH);
if (obsqsv->params.nRateControl == MFX_RATECONTROL_CQP)
blog(LOG_INFO,
"\tqpi: %d\n"
"\tqpb: %d\n"
"\tqpp: %d",
qpi, qpb, qpp);
"\tqpi: %d\n"
"\tqpb: %d\n"
"\tqpp: %d",
qpi, qpb, qpp);
blog(LOG_INFO,
"\tfps_num: %d\n"
"\tfps_den: %d\n"
"\twidth: %d\n"
"\theight: %d",
voi->fps_num, voi->fps_den,
width, height);
"\tfps_num: %d\n"
"\tfps_den: %d\n"
"\twidth: %d\n"
"\theight: %d",
voi->fps_num, voi->fps_den, width, height);
info("debug info:");
}
@@ -448,7 +448,6 @@ static bool obs_qsv_update(void *data, obs_data_t *settings)
bool success = update_settings(obsqsv, settings);
int ret;
if (success) {
EnterCriticalSection(&g_QsvCs);
@@ -486,9 +485,10 @@ static void *obs_qsv_create(obs_data_t *settings, obs_encoder_t *encoder)
qsv_encoder_version(&g_verMajor, &g_verMinor);
blog(LOG_INFO, "\tmajor: %d\n"
"\tminor: %d",
g_verMajor, g_verMinor);
blog(LOG_INFO,
"\tmajor: %d\n"
"\tminor: %d",
g_verMajor, g_verMinor);
// MSDK 1.6 or less doesn't have automatic DTS calculation
// including early SandyBridge.
@@ -496,17 +496,17 @@ static void *obs_qsv_create(obs_data_t *settings, obs_encoder_t *encoder)
if (g_verMajor == 1 && g_verMinor < 7) {
int64_t interval = obsqsv->params.nbFrames + 1;
int64_t GopPicSize = (int64_t)(obsqsv->params.nKeyIntSec *
obsqsv->params.nFpsNum /
(float)obsqsv->params.nFpsDen);
g_pts2dtsShift = GopPicSize - (GopPicSize / interval) *
interval;
obsqsv->params.nFpsNum /
(float)obsqsv->params.nFpsDen);
g_pts2dtsShift =
GopPicSize - (GopPicSize / interval) * interval;
blog(LOG_INFO, "\tinterval: %d\n"
"\tGopPictSize: %d\n"
"\tg_pts2dtsShift: %d",
interval, GopPicSize, g_pts2dtsShift);
}
else
blog(LOG_INFO,
"\tinterval: %d\n"
"\tGopPictSize: %d\n"
"\tg_pts2dtsShift: %d",
interval, GopPicSize, g_pts2dtsShift);
} else
g_pts2dtsShift = -1;
if (!obsqsv->context) {
@@ -514,8 +514,7 @@ static void *obs_qsv_create(obs_data_t *settings, obs_encoder_t *encoder)
return NULL;
}
obsqsv->performance_token =
os_request_high_performance("qsv encoding");
obsqsv->performance_token = os_request_high_performance("qsv encoding");
g_bFirst = true;
@@ -534,7 +533,7 @@ static bool obs_qsv_extra_data(void *data, uint8_t **extra_data, size_t *size)
return true;
}
static bool obs_qsv_sei(void *data, uint8_t **sei,size_t *size)
static bool obs_qsv_sei(void *data, uint8_t **sei, size_t *size)
{
struct obs_qsv *obsqsv = data;
@@ -556,7 +555,7 @@ static inline bool valid_format(enum video_format format)
}
static inline void cap_resolution(obs_encoder_t *encoder,
struct video_scale_info *info)
struct video_scale_info *info)
{
enum qsv_cpu_platform qsv_platform = qsv_get_cpu_platform();
uint32_t width = obs_encoder_get_width(encoder);
@@ -584,15 +583,17 @@ static void obs_qsv_video_info(void *data, struct video_scale_info *info)
pref_format = obs_encoder_get_preferred_video_format(obsqsv->encoder);
if (!valid_format(pref_format)) {
pref_format = valid_format(info->format) ?
info->format : VIDEO_FORMAT_NV12;
pref_format = valid_format(info->format) ? info->format
: VIDEO_FORMAT_NV12;
}
info->format = pref_format;
cap_resolution(obsqsv->encoder, info);
}
static void parse_packet(struct obs_qsv *obsqsv, struct encoder_packet *packet, mfxBitstream *pBS, uint32_t fps_num, bool *received_packet)
static void parse_packet(struct obs_qsv *obsqsv, struct encoder_packet *packet,
mfxBitstream *pBS, uint32_t fps_num,
bool *received_packet)
{
uint8_t *start, *end;
int type;
@@ -604,23 +605,24 @@ static void parse_packet(struct obs_qsv *obsqsv, struct encoder_packet *packet,
da_resize(obsqsv->packet_data, 0);
da_push_back_array(obsqsv->packet_data, &pBS->Data[pBS->DataOffset],
pBS->DataLength);
pBS->DataLength);
packet->data = obsqsv->packet_data.array;
packet->size = obsqsv->packet_data.num;
packet->type = OBS_ENCODER_VIDEO;
packet->pts = pBS->TimeStamp * fps_num / 90000;
packet->keyframe = (pBS->FrameType &
(MFX_FRAMETYPE_I | MFX_FRAMETYPE_REF));
packet->keyframe =
(pBS->FrameType & (MFX_FRAMETYPE_I | MFX_FRAMETYPE_REF));
/* ------------------------------------ */
start = obsqsv->packet_data.array;
end = start + obsqsv->packet_data.num;
start = (uint8_t*)obs_avc_find_startcode(start, end);
start = (uint8_t *)obs_avc_find_startcode(start, end);
while (true) {
while (start < end && !*(start++));
while (start < end && !*(start++))
;
if (start == end)
break;
@@ -638,7 +640,7 @@ static void parse_packet(struct obs_qsv *obsqsv, struct encoder_packet *packet,
start[0] |= prev_type << 5;
}
start = (uint8_t*)obs_avc_find_startcode(start, end);
start = (uint8_t *)obs_avc_find_startcode(start, end);
}
/* ------------------------------------ */
@@ -651,8 +653,7 @@ static void parse_packet(struct obs_qsv *obsqsv, struct encoder_packet *packet,
// In case MSDK doesn't support automatic DecodeTimeStamp, do manual
// calculation
if (g_pts2dtsShift >= 0)
{
if (g_pts2dtsShift >= 0) {
if (g_bFirst) {
packet->dts = packet->pts - 3 * obsqsv->params.nFpsDen;
} else if (pFrame) {
@@ -681,7 +682,7 @@ static void parse_packet(struct obs_qsv *obsqsv, struct encoder_packet *packet,
}
static bool obs_qsv_encode(void *data, struct encoder_frame *frame,
struct encoder_packet *packet, bool *received_packet)
struct encoder_packet *packet, bool *received_packet)
{
struct obs_qsv *obsqsv = data;
@@ -690,7 +691,6 @@ static bool obs_qsv_encode(void *data, struct encoder_frame *frame,
EnterCriticalSection(&g_QsvCs);
video_t *video = obs_encoder_video(obsqsv->encoder);
const struct video_output_info *voi = video_output_get_info(video);
@@ -703,17 +703,13 @@ static bool obs_qsv_encode(void *data, struct encoder_frame *frame,
// FIXME: remove null check from the top of this function
// if we actually do expect null frames to complete output.
if (frame)
ret = qsv_encoder_encode(
obsqsv->context,
qsvPTS,
frame->data[0], frame->data[1], frame->linesize[0],
frame->linesize[1],
&pBS);
ret = qsv_encoder_encode(obsqsv->context, qsvPTS,
frame->data[0], frame->data[1],
frame->linesize[0], frame->linesize[1],
&pBS);
else
ret = qsv_encoder_encode(
obsqsv->context,
qsvPTS,
NULL, NULL, 0, 0, &pBS);
ret = qsv_encoder_encode(obsqsv->context, qsvPTS, NULL, NULL, 0,
0, &pBS);
if (ret < 0) {
warn("encode failed");
@@ -741,5 +737,5 @@ struct obs_encoder_info obs_qsv_encoder = {
.get_defaults = obs_qsv_defaults,
.get_extra_data = obs_qsv_extra_data,
.get_sei_data = obs_qsv_sei,
.get_video_info = obs_qsv_video_info
.get_video_info = obs_qsv_video_info,
};