20 #if !defined(_WIN32_WINNT) || _WIN32_WINNT < 0x0602
22 #define _WIN32_WINNT 0x0602
62 #define MF_TIMEBASE (AVRational){1, 10000000}
64 #define MF_INVALID_TIME AV_NOPTS_VALUE
73 while (!(
c->async_need_input ||
c->async_have_output ||
c->draining_done ||
c->async_marker)) {
74 IMFMediaEvent *ev =
NULL;
75 MediaEventType ev_id = 0;
76 HRESULT hr = IMFMediaEventGenerator_GetEvent(
c->async_events, 0, &ev);
82 IMFMediaEvent_GetType(ev, &ev_id);
86 c->async_need_input = 1;
89 c->async_have_output = 1;
99 IMFMediaEvent_Release(ev);
123 IMFSample_SetSampleTime(
sample, stime);
134 HRESULT hr = IMFSample_GetSampleTime(
sample, &
pts);
147 hr = IMFAttributes_GetBlobSize(
type, &MF_MT_USER_DATA, &sz);
148 if (!FAILED(hr) && sz > 0) {
168 if (!
c->out_info.cbSize && !
c->out_stream_provides_samples) {
169 hr = IMFAttributes_GetUINT32(
type, &MF_MT_AUDIO_AVG_BYTES_PER_SECOND, &sz);
172 "assuming %d bytes instead.\n", (
int)sz);
173 c->out_info.cbSize = sz;
185 hr = IMFAttributes_GetBlobSize(
type, &MF_MT_MPEG_SEQUENCE_HEADER, &sz);
186 if (!FAILED(hr) && sz > 0) {
190 hr = IMFAttributes_GetBlob(
type, &MF_MT_MPEG_SEQUENCE_HEADER, extradata, sz,
NULL);
210 hr = IMFTransform_GetOutputCurrentType(
c->mft,
c->out_stream_id, &
type);
222 }
else if (
c->is_audio) {
229 IMFMediaType_Release(
type);
244 hr = IMFSample_GetTotalLength(
sample, &
len);
257 IMFMediaBuffer_Release(
buffer);
263 IMFMediaBuffer_Unlock(
buffer);
264 IMFMediaBuffer_Release(
buffer);
268 hr = IMFAttributes_GetUINT32(
sample, &MFSampleExtension_CleanPoint, &t32);
269 if (
c->is_audio || (!FAILED(hr) && t32 != 0))
272 hr = IMFAttributes_GetUINT64(
sample, &MFSampleExtension_DecodeTimestamp, &t);
280 c->reorder_delay = avpkt->
pts - avpkt->
dts;
281 avpkt->
dts -=
c->reorder_delay;
282 avpkt->
pts -=
c->reorder_delay;
299 c->in_info.cbAlignment);
320 c->in_info.cbAlignment);
326 IMFSample_Release(
sample);
332 IMFMediaBuffer_Release(
buffer);
333 IMFSample_Release(
sample);
339 IMFMediaBuffer_SetCurrentLength(
buffer,
size);
340 IMFMediaBuffer_Unlock(
buffer);
341 IMFMediaBuffer_Release(
buffer);
343 IMFSample_Release(
sample);
376 if (
c->async_events) {
379 if (!
c->async_need_input)
383 IMFSample_SetUINT32(
sample, &MFSampleExtension_Discontinuity, TRUE);
385 hr = IMFTransform_ProcessInput(
c->mft,
c->in_stream_id,
sample, 0);
386 if (hr == MF_E_NOTACCEPTING) {
388 }
else if (FAILED(hr)) {
392 c->async_need_input = 0;
393 }
else if (!
c->draining) {
394 hr = IMFTransform_ProcessMessage(
c->mft, MFT_MESSAGE_COMMAND_DRAIN, 0);
400 c->async_need_input = 0;
412 MFT_OUTPUT_DATA_BUFFER out_buffers;
420 if (
c->async_events) {
423 if (!
c->async_have_output ||
c->draining_done) {
429 if (!
c->out_stream_provides_samples) {
432 c->out_info.cbAlignment);
437 out_buffers = (MFT_OUTPUT_DATA_BUFFER) {
438 .dwStreamID =
c->out_stream_id,
443 hr = IMFTransform_ProcessOutput(
c->mft, 0, 1, &out_buffers, &st);
445 if (out_buffers.pEvents)
446 IMFCollection_Release(out_buffers.pEvents);
449 *out_sample = out_buffers.pSample;
454 if (out_buffers.pSample)
455 IMFSample_Release(out_buffers.pSample);
457 if (hr == MF_E_TRANSFORM_NEED_MORE_INPUT) {
459 c->draining_done = 1;
461 }
else if (hr == MF_E_TRANSFORM_STREAM_CHANGE) {
469 c->async_have_output = 0;
481 c->async_have_output = 0;
483 if (
ret >= 0 && !*out_sample)
495 if (!
c->frame->buf[0]) {
501 if (
c->frame->buf[0]) {
507 if (
c->is_video &&
c->codec_api) {
509 ICodecAPI_SetValue(
c->codec_api, &ff_CODECAPI_AVEncVideoForceKeyFrame,
FF_VAL_VT_UI4(1));
515 IMFSample_Release(
sample);
526 IMFSample_Release(
sample);
542 hr = IMFAttributes_GetUINT32(
type, &MF_MT_AUDIO_SAMPLES_PER_SECOND, &t);
546 hr = IMFAttributes_GetUINT32(
type, &MF_MT_AUDIO_NUM_CHANNELS, &t);
550 hr = IMFAttributes_GetGUID(
type, &MF_MT_SUBTYPE, &
tg);
552 if (IsEqualGUID(&
c->main_subtype, &
tg))
557 hr = IMFAttributes_GetUINT32(
type, &MF_MT_AUDIO_AVG_BYTES_PER_SECOND, &t);
561 score |= (1LL << 31) -
diff;
563 score |= (1LL << 30) +
diff;
567 hr = IMFAttributes_GetUINT32(
type, &MF_MT_AAC_PAYLOAD_TYPE, &t);
568 if (!FAILED(hr) && t != 0)
597 hr = IMFAttributes_GetUINT32(
type, &MF_MT_AUDIO_SAMPLES_PER_SECOND, &t);
601 hr = IMFAttributes_GetUINT32(
type, &MF_MT_AUDIO_NUM_CHANNELS, &t);
619 hr = IMFAttributes_GetUINT32(
type, &MF_MT_AUDIO_SAMPLES_PER_SECOND, &t);
625 hr = IMFAttributes_GetUINT32(
type, &MF_MT_AUDIO_NUM_CHANNELS, &t);
641 hr = IMFAttributes_GetGUID(
type, &MF_MT_SUBTYPE, &
tg);
643 if (IsEqualGUID(&
c->main_subtype, &
tg))
656 IMFAttributes_SetUINT32(
type, &MF_MT_INTERLACE_MODE, MFVideoInterlace_Progressive);
678 IMFAttributes_SetUINT32(
type, &MF_MT_MPEG2_PROFILE,
profile);
681 IMFAttributes_SetUINT32(
type, &MF_MT_AVG_BITRATE, avctx->
bit_rate);
688 if (
c->opt_enc_rc >= 0)
689 ICodecAPI_SetValue(
c->codec_api, &ff_CODECAPI_AVEncCommonRateControlMode,
FF_VAL_VT_UI4(
c->opt_enc_rc));
691 if (
c->opt_enc_quality >= 0)
692 ICodecAPI_SetValue(
c->codec_api, &ff_CODECAPI_AVEncCommonQuality,
FF_VAL_VT_UI4(
c->opt_enc_quality));
703 ICodecAPI_SetValue(
c->codec_api, &ff_CODECAPI_AVEncH264CABACEnable,
FF_VAL_VT_BOOL(1));
705 if (
c->opt_enc_scenario >= 0)
706 ICodecAPI_SetValue(
c->codec_api, &ff_CODECAPI_AVScenarioInfo,
FF_VAL_VT_UI4(
c->opt_enc_scenario));
739 IMFMediaType *out_type =
NULL;
740 int64_t out_type_score = -1;
741 int out_type_index = -1;
749 hr = IMFTransform_GetOutputAvailableType(
c->mft,
c->out_stream_id, n, &
type);
750 if (hr == MF_E_NO_MORE_TYPES || hr == E_NOTIMPL)
752 if (hr == MF_E_TRANSFORM_TYPE_NOT_SET) {
768 }
else if (
c->is_audio) {
772 if (score > out_type_score) {
774 IMFMediaType_Release(out_type);
776 out_type_score = score;
778 IMFMediaType_AddRef(out_type);
781 IMFMediaType_Release(
type);
787 hr =
c->functions.MFCreateMediaType(&out_type);
797 }
else if (
c->is_audio) {
805 hr = IMFTransform_SetOutputType(
c->mft,
c->out_stream_id, out_type, 0);
808 }
else if (hr == MF_E_TRANSFORM_TYPE_NOT_SET) {
819 IMFMediaType_Release(out_type);
828 IMFMediaType *in_type =
NULL;
829 int64_t in_type_score = -1;
830 int in_type_index = -1;
838 hr = IMFTransform_GetInputAvailableType(
c->mft,
c->in_stream_id, n, &
type);
839 if (hr == MF_E_NO_MORE_TYPES || hr == E_NOTIMPL)
841 if (hr == MF_E_TRANSFORM_TYPE_NOT_SET) {
857 }
else if (
c->is_audio) {
861 if (score > in_type_score) {
863 IMFMediaType_Release(in_type);
865 in_type_score = score;
867 IMFMediaType_AddRef(in_type);
870 IMFMediaType_Release(
type);
885 }
else if (
c->is_audio) {
893 hr = IMFTransform_SetInputType(
c->mft,
c->in_stream_id, in_type, 0);
896 }
else if (hr == MF_E_TRANSFORM_TYPE_NOT_SET) {
907 IMFMediaType_Release(in_type);
919 for (n = 0; n < 2 && (need_input ||
need_output); n++) {
924 need_input =
ret < 1;
944 hr = IMFTransform_GetInputStreamInfo(
c->mft,
c->in_stream_id, &
c->in_info);
948 (
int)
c->in_info.cbSize, (
int)
c->in_info.cbAlignment);
950 hr = IMFTransform_GetOutputStreamInfo(
c->mft,
c->out_stream_id, &
c->out_info);
953 c->out_stream_provides_samples =
954 (
c->out_info.dwFlags & MFT_OUTPUT_STREAM_PROVIDES_SAMPLES) ||
955 (
c->out_info.dwFlags & MFT_OUTPUT_STREAM_CAN_PROVIDE_SAMPLES);
957 (
int)
c->out_info.cbSize, (
int)
c->out_info.cbAlignment,
958 c->out_stream_provides_samples ?
" (provides samples)" :
"");
970 IMFAttributes *attrs;
976 if (!(
c->is_video &&
c->opt_enc_hw))
979 hr = IMFTransform_GetAttributes(
c->mft, &attrs);
985 hr = IMFAttributes_GetUINT32(attrs, &MF_TRANSFORM_ASYNC, &v);
996 hr = IMFAttributes_SetUINT32(attrs, &MF_TRANSFORM_ASYNC_UNLOCK, TRUE);
1002 hr = IMFTransform_QueryInterface(
c->mft, &IID_IMFMediaEventGenerator, (
void **)&
c->async_events);
1011 IMFAttributes_Release(attrs);
1016 const AVCodec *codec,
int use_hw)
1020 MFT_REGISTER_TYPE_INFO reg = {0};
1029 reg.guidSubtype = *subtype;
1032 reg.guidMajorType = MFMediaType_Audio;
1033 category = MFT_CATEGORY_AUDIO_ENCODER;
1035 reg.guidMajorType = MFMediaType_Video;
1036 category = MFT_CATEGORY_VIDEO_ENCODER;
1058 c->is_video = !
c->is_audio;
1061 if (
c->is_video &&
c->opt_enc_hw)
1067 c->main_subtype = *subtype;
1075 hr = IMFTransform_QueryInterface(
c->mft, &IID_ICodecAPI, (
void **)&
c->codec_api);
1080 hr = IMFTransform_GetStreamIDs(
c->mft, 1, &
c->in_stream_id, 1, &
c->out_stream_id);
1081 if (hr == E_NOTIMPL) {
1082 c->in_stream_id =
c->out_stream_id = 0;
1083 }
else if (FAILED(hr)) {
1094 hr = IMFTransform_ProcessMessage(
c->mft, MFT_MESSAGE_NOTIFY_BEGIN_STREAMING, 0);
1100 hr = IMFTransform_ProcessMessage(
c->mft, MFT_MESSAGE_NOTIFY_START_OF_STREAM, 0);
1108 int sleep = 10000, total = 0;
1110 while (total < 70*1000) {
1127 avctx->
extradata ?
"Got" :
"Didn't get", total / 1000);
1134 #define LOAD_MF_FUNCTION(context, func_name) \
1135 context->functions.func_name = (void *)dlsym(context->library, #func_name); \
1136 if (!context->functions.func_name) { \
1137 av_log(context, AV_LOG_ERROR, "DLL mfplat.dll failed to find function "\
1139 return AVERROR_UNKNOWN; \
1145 #define LOAD_MF_FUNCTION(context, func_name) \
1146 context->functions.func_name = func_name; \
1147 if (!context->functions.func_name) { \
1148 av_log(context, AV_LOG_ERROR, "Failed to find function " #func_name \
1150 return AVERROR_UNKNOWN; \
1162 c->library = dlopen(
"mfplat.dll", 0);
1186 ICodecAPI_Release(
c->codec_api);
1188 if (
c->async_events)
1189 IMFMediaEventGenerator_Release(
c->async_events);
1195 dlclose(
c->library);
1221 #define OFFSET(x) offsetof(MFContext, x)
1223 #define MF_ENCODER(MEDIATYPE, NAME, ID, OPTS, FMTS, CAPS) \
1224 static const AVClass ff_ ## NAME ## _mf_encoder_class = { \
1225 .class_name = #NAME "_mf", \
1226 .item_name = av_default_item_name, \
1228 .version = LIBAVUTIL_VERSION_INT, \
1230 const FFCodec ff_ ## NAME ## _mf_encoder = { \
1231 .p.priv_class = &ff_ ## NAME ## _mf_encoder_class, \
1232 .p.name = #NAME "_mf", \
1233 .p.long_name = NULL_IF_CONFIG_SMALL(#ID " via MediaFoundation"), \
1234 .p.type = AVMEDIA_TYPE_ ## MEDIATYPE, \
1235 .p.id = AV_CODEC_ID_ ## ID, \
1236 .priv_data_size = sizeof(MFContext), \
1238 .close = mf_close, \
1239 FF_CODEC_RECEIVE_PACKET_CB(mf_receive_packet), \
1242 .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE | \
1243 FF_CODEC_CAP_INIT_CLEANUP, \
1247 .p.sample_fmts = (const enum AVSampleFormat[]){ AV_SAMPLE_FMT_S16, \
1248 AV_SAMPLE_FMT_NONE },
1250 .p.capabilities = AV_CODEC_CAP_DELAY | AV_CODEC_CAP_HYBRID | \
1251 AV_CODEC_CAP_DR1 | AV_CODEC_CAP_VARIABLE_FRAME_SIZE,
1257 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
1259 {
"rate_control",
"Select rate control mode",
OFFSET(opt_enc_rc),
AV_OPT_TYPE_INT, {.i64 = -1}, -1, INT_MAX,
VE,
"rate_control"},
1260 {
"default",
"Default mode", 0,
AV_OPT_TYPE_CONST, {.i64 = -1}, 0, 0,
VE,
"rate_control"},
1270 {
"scenario",
"Select usage scenario",
OFFSET(opt_enc_scenario),
AV_OPT_TYPE_INT, {.i64 = -1}, -1, INT_MAX,
VE,
"scenario"},
1271 {
"default",
"Default scenario", 0,
AV_OPT_TYPE_CONST, {.i64 = -1}, 0, 0,
VE,
"scenario"},
1285 .p.pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_NV12, \
1286 AV_PIX_FMT_YUV420P, \
1289 .p.capabilities = AV_CODEC_CAP_DELAY | AV_CODEC_CAP_HYBRID | \