[Libav-user] Video conversion issue

Beere, Viswanath viswanath.beere at capgemini.com
Wed Feb 28 11:40:19 EET 2018


Hi Team,

Thank you very much for developing and supporting this great FFMPEG libaray .
I am an iOS developer. I used FFMPEG library for video conversion i.e mpg format to mp4 format with high quality video. I am getting below error in debug console and getting return zero value for this line **ret = avcodec_open2(enc_ctx, encoder, NULL); ** in open_output_file method for below code .
Would you please help on this issue.
Thank you very in Advance.


Code :

———————————————————————————————————————————————————————————————————————————————————————————————————————

//

#import "VideoTranscodeObj.h"

#include <libavcodec/avcodec.h>

#include <libavformat/avformat.h>

#include <libavfilter/buffersink.h>

#include <libavfilter/buffersrc.h>

#include <libavutil/opt.h>

#include <libavutil/pixdesc.h>


#import "VideoTranscodeH264.h"


@implementation VideoTranscodeH264


static AVFormatContext *ifmt_ctx;

static AVFormatContext *ofmt_ctx;

typedef struct FilteringContext {

    AVFilterContext *buffersink_ctx;

    AVFilterContext *buffersrc_ctx;

    AVFilterGraph *filter_graph;

} FilteringContext;

static FilteringContext *filter_ctx;

typedef struct StreamContext {

    AVCodecContext *dec_ctx;

    AVCodecContext *enc_ctx;

} StreamContext;

static StreamContext *stream_ctx;

static int open_input_file(const char *filename)

{

    int ret;

    unsigned int i;

    ifmt_ctx = NULL;

    if ((ret = avformat_open_input(&ifmt_ctx, filename, NULL, NULL)) < 0) {

        av_log(NULL, AV_LOG_ERROR, "Cannot open input file\n");

        return ret;

    }

    if ((ret = avformat_find_stream_info(ifmt_ctx, NULL)) < 0) {

        av_log(NULL, AV_LOG_ERROR, "Cannot find stream information\n");

        return ret;

    }

    stream_ctx = av_mallocz_array(ifmt_ctx->nb_streams, sizeof(*stream_ctx));

    if (!stream_ctx)

        return AVERROR(ENOMEM);

    for (i = 0; i < ifmt_ctx->nb_streams; i++) {

        AVStream *stream = ifmt_ctx->streams[i];

        AVCodec *dec = avcodec_find_decoder(stream->codecpar->codec_id);

        AVCodecContext *codec_ctx;

        if (!dec) {

            av_log(NULL, AV_LOG_ERROR, "Failed to find decoder for stream #%u\n", i);

            return AVERROR_DECODER_NOT_FOUND;

        }

        codec_ctx = avcodec_alloc_context3(dec);

        if (!codec_ctx) {

            av_log(NULL, AV_LOG_ERROR, "Failed to allocate the decoder context for stream #%u\n", i);

            return AVERROR(ENOMEM);

        }

        ret = avcodec_parameters_to_context(codec_ctx, stream->codecpar);

        if (ret < 0) {

            av_log(NULL, AV_LOG_ERROR, "Failed to copy decoder parameters to input decoder context "

                   "for stream #%u\n", i);

            return ret;

        }

        /* Reencode video & audio and remux subtitles etc. */

        if (codec_ctx->codec_type == AVMEDIA_TYPE_VIDEO

            || codec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) {

            if (codec_ctx->codec_type == AVMEDIA_TYPE_VIDEO)

                codec_ctx->framerate = av_guess_frame_rate(ifmt_ctx, stream, NULL);

            /* Open decoder */

            ret = avcodec_open2(codec_ctx, dec, NULL);

            if (ret < 0) {

                av_log(NULL, AV_LOG_ERROR, "Failed to open decoder for stream #%u\n", i);

                return ret;

            }

        }

        stream_ctx[i].dec_ctx = codec_ctx;

    }

    av_dump_format(ifmt_ctx, 0, filename, 0);

    return 0;

}

static int open_output_file(const char *filename)

{

    AVStream *out_stream;

    AVStream *in_stream;

    AVCodecContext *dec_ctx, *enc_ctx = NULL;

    AVCodec *encoder;

    int ret;

    unsigned int i;

    ofmt_ctx = NULL;

    avformat_alloc_output_context2(&ofmt_ctx, NULL, NULL, filename);

    if (!ofmt_ctx) {

        av_log(NULL, AV_LOG_ERROR, "Could not create output context\n");

        return AVERROR_UNKNOWN;

    }

    for (i = 0; i < ifmt_ctx->nb_streams; i++) {

        out_stream = avformat_new_stream(ofmt_ctx, NULL);

        if (!out_stream) {

            av_log(NULL, AV_LOG_ERROR, "Failed allocating output stream\n");

            return AVERROR_UNKNOWN;

        }

        in_stream = ifmt_ctx->streams[i];

        dec_ctx = stream_ctx[i].dec_ctx;

        if (dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO

            || dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) {

            if (dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {



                encoder = avcodec_find_encoder(AV_CODEC_ID_H264);

                if (!encoder) {

                    av_log(NULL, AV_LOG_FATAL, "Necessary encoder not found\n");

                    return AVERROR_INVALIDDATA;

                }


                enc_ctx = avcodec_alloc_context3(encoder);

                if (!enc_ctx) {

                    av_log(NULL, AV_LOG_FATAL, "Failed to allocate the encoder context\n");

                    return AVERROR(ENOMEM);

                }


                enc_ctx->height = dec_ctx->height;

                enc_ctx->width = dec_ctx->width;

                enc_ctx->sample_aspect_ratio = dec_ctx->sample_aspect_ratio;

                /* take first format from list of supported formats */

                enc_ctx->pix_fmt = encoder->pix_fmts[0];

                /* video time_base can be set to whatever is handy and supported by encoder */

                enc_ctx->time_base = dec_ctx->time_base;

                //                enc_ctx->pix_fmt = AV_PIX_FMT_YUV420P;

                enc_ctx->profile = FF_PROFILE_H264_BASELINE;

                enc_ctx->level = 40;

                enc_ctx->bit_rate = dec_ctx->bit_rate;

                //                enc_ctx->gop_size = 12;

                enc_ctx->thread_count = FF_THREAD_SLICE;

                                av_opt_set(enc_ctx->priv_data, "preset", "slow", 0);

                //                av_opt_set(enc_ctx->priv_data, "vprofile", "main", AV_OPT_SEARCH_CHILDREN);

                //                enc_ctx->me_range = 16;

                enc_ctx->qmin = 18;

                enc_ctx->qmax = 24;

                if (encoder->id ==  AV_CODEC_ID_H264) {

                    av_opt_set( enc_ctx->priv_data, "preset", "slow", 0 );

                }

            } else {

                encoder = avcodec_find_encoder(dec_ctx->codec_id);

                if (dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO && !encoder) {

                    encoder = avcodec_find_encoder(AV_CODEC_ID_MP2);

                }

                enc_ctx->sample_rate = dec_ctx->sample_rate;

                enc_ctx->channel_layout = dec_ctx->channel_layout;

                enc_ctx->channels = av_get_channel_layout_nb_channels(enc_ctx->channel_layout);

                /* take first format from list of supported formats */

                if (encoder->sample_fmts) {

                    enc_ctx->sample_fmt = encoder->sample_fmts[0];

                }else {



                }



                enc_ctx->time_base = (AVRational){1, enc_ctx->sample_rate};

            }

            /* Third parameter can be used to pass settings to encoder */

            ret = avcodec_open2(enc_ctx, encoder, NULL);

            if (ret < 0) {

                av_log(NULL, AV_LOG_ERROR, "Cannot open video encoder for stream #%u\n", i);

                return ret;

            }

            ret = avcodec_parameters_from_context(out_stream->codecpar, enc_ctx);

            if (ret < 0) {

                av_log(NULL, AV_LOG_ERROR, "Failed to copy encoder parameters to output stream #%u\n", i);

                return ret;

            }

            if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER)

                enc_ctx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;

            out_stream->time_base = enc_ctx->time_base;

            stream_ctx[i].enc_ctx = enc_ctx;

        } else if (dec_ctx->codec_type == AVMEDIA_TYPE_UNKNOWN) {

            av_log(NULL, AV_LOG_FATAL, "Elementary stream #%d is of unknown type, cannot proceed\n", i);

            return AVERROR_INVALIDDATA;

        } else {

            /* if this stream must be remuxed */

            ret = avcodec_parameters_copy(out_stream->codecpar, in_stream->codecpar);

            if (ret < 0) {

                av_log(NULL, AV_LOG_ERROR, "Copying parameters for stream #%u failed\n", i);

                return ret;

            }

            out_stream->time_base = in_stream->time_base;

        }

    }

    av_dump_format(ofmt_ctx, 0, filename, 1);

    if (!(ofmt_ctx->oformat->flags & AVFMT_NOFILE)) {

        ret = avio_open(&ofmt_ctx->pb, filename, AVIO_FLAG_WRITE);

        if (ret < 0) {

            av_log(NULL, AV_LOG_ERROR, "Could not open output file '%s'", filename);

            return ret;

        }

    }

    /* init muxer, write output file header */

    ret = avformat_write_header(ofmt_ctx, NULL);

    if (ret < 0) {

        av_log(NULL, AV_LOG_ERROR, "Error occurred when opening output file\n");

        return ret;

    }

    return 0;

}

static int init_filter(FilteringContext* fctx, AVCodecContext *dec_ctx,

                       AVCodecContext *enc_ctx, const char *filter_spec)

{

    char args[512];

    int ret = 0;

    const AVFilter *buffersrc = NULL;

    const AVFilter *buffersink = NULL;

    AVFilterContext *buffersrc_ctx = NULL;

    AVFilterContext *buffersink_ctx = NULL;

    AVFilterInOut *outputs = avfilter_inout_alloc();

    AVFilterInOut *inputs  = avfilter_inout_alloc();

    AVFilterGraph *filter_graph = avfilter_graph_alloc();

    if (!outputs || !inputs || !filter_graph) {

        ret = AVERROR(ENOMEM);

        goto end;

    }

    if (dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {

        buffersrc = avfilter_get_by_name("buffer");

        buffersink = avfilter_get_by_name("buffersink");

        if (!buffersrc || !buffersink) {

            av_log(NULL, AV_LOG_ERROR, "filtering source or sink element not found\n");

            ret = AVERROR_UNKNOWN;

            goto end;

        }

        snprintf(args, sizeof(args),

                 "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",

                 dec_ctx->width, dec_ctx->height, dec_ctx->pix_fmt,

                 dec_ctx->time_base.num, dec_ctx->time_base.den,

                 dec_ctx->sample_aspect_ratio.num,

                 dec_ctx->sample_aspect_ratio.den);

        ret = avfilter_graph_create_filter(&buffersrc_ctx, buffersrc, "in",

                                           args, NULL, filter_graph);

        if (ret < 0) {

            av_log(NULL, AV_LOG_ERROR, "Cannot create buffer source\n");

            goto end;

        }

        ret = avfilter_graph_create_filter(&buffersink_ctx, buffersink, "out",

                                           NULL, NULL, filter_graph);

        if (ret < 0) {

            av_log(NULL, AV_LOG_ERROR, "Cannot create buffer sink\n");

            goto end;

        }

        ret = av_opt_set_bin(buffersink_ctx, "pix_fmts",

                             (uint8_t*)&enc_ctx->pix_fmt, sizeof(enc_ctx->pix_fmt),

                             AV_OPT_SEARCH_CHILDREN);

        if (ret < 0) {

            av_log(NULL, AV_LOG_ERROR, "Cannot set output pixel format\n");

            goto end;

        }

    } else if (dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) {

        buffersrc = avfilter_get_by_name("abuffer");

        buffersink = avfilter_get_by_name("abuffersink");

        if (!buffersrc || !buffersink) {

            av_log(NULL, AV_LOG_ERROR, "filtering source or sink element not found\n");

            ret = AVERROR_UNKNOWN;

            goto end;

        }

        if (!dec_ctx->channel_layout)

            dec_ctx->channel_layout =

            av_get_default_channel_layout(dec_ctx->channels);

        snprintf(args, sizeof(args),

                 "time_base=%d/%d:sample_rate=%d:sample_fmt=%s:channel_layout=0x%"PRIx64,

                 dec_ctx->time_base.num, dec_ctx->time_base.den, dec_ctx->sample_rate,

                 av_get_sample_fmt_name(dec_ctx->sample_fmt),

                 dec_ctx->channel_layout);

        ret = avfilter_graph_create_filter(&buffersrc_ctx, buffersrc, "in",

                                           args, NULL, filter_graph);

        if (ret < 0) {

            av_log(NULL, AV_LOG_ERROR, "Cannot create audio buffer source\n");

            goto end;

        }

        ret = avfilter_graph_create_filter(&buffersink_ctx, buffersink, "out",

                                           NULL, NULL, filter_graph);

        if (ret < 0) {

            av_log(NULL, AV_LOG_ERROR, "Cannot create audio buffer sink\n");

            goto end;

        }

        ret = av_opt_set_bin(buffersink_ctx, "sample_fmts",

                             (uint8_t*)&enc_ctx->sample_fmt, sizeof(enc_ctx->sample_fmt),

                             AV_OPT_SEARCH_CHILDREN);

        if (ret < 0) {

            av_log(NULL, AV_LOG_ERROR, "Cannot set output sample format\n");

            goto end;

        }

        ret = av_opt_set_bin(buffersink_ctx, "channel_layouts",

                             (uint8_t*)&enc_ctx->channel_layout,

                             sizeof(enc_ctx->channel_layout), AV_OPT_SEARCH_CHILDREN);

        if (ret < 0) {

            av_log(NULL, AV_LOG_ERROR, "Cannot set output channel layout\n");

            goto end;

        }

        ret = av_opt_set_bin(buffersink_ctx, "sample_rates",

                             (uint8_t*)&enc_ctx->sample_rate, sizeof(enc_ctx->sample_rate),

                             AV_OPT_SEARCH_CHILDREN);

        if (ret < 0) {

            av_log(NULL, AV_LOG_ERROR, "Cannot set output sample rate\n");

            goto end;

        }

    } else {

        ret = AVERROR_UNKNOWN;

        goto end;

    }

    /* Endpoints for the filter graph. */

    outputs->name       = av_strdup("in");

    outputs->filter_ctx = buffersrc_ctx;

    outputs->pad_idx    = 0;

    outputs->next       = NULL;

    inputs->name       = av_strdup("out");

    inputs->filter_ctx = buffersink_ctx;

    inputs->pad_idx    = 0;

    inputs->next       = NULL;

    if (!outputs->name || !inputs->name) {

        ret = AVERROR(ENOMEM);

        goto end;

    }

    if ((ret = avfilter_graph_parse_ptr(filter_graph, filter_spec,

                                        &inputs, &outputs, NULL)) < 0)

        goto end;

    if ((ret = avfilter_graph_config(filter_graph, NULL)) < 0)

        goto end;

    /* Fill FilteringContext */

    fctx->buffersrc_ctx = buffersrc_ctx;

    fctx->buffersink_ctx = buffersink_ctx;

    fctx->filter_graph = filter_graph;

end:

    avfilter_inout_free(&inputs);

    avfilter_inout_free(&outputs);

    return ret;

}

static int init_filters(void)

{

    const char *filter_spec;

    unsigned int i;

    int ret;

    filter_ctx = av_malloc_array(ifmt_ctx->nb_streams, sizeof(*filter_ctx));

    if (!filter_ctx)

        return AVERROR(ENOMEM);

    for (i = 0; i < ifmt_ctx->nb_streams; i++) {

        filter_ctx[i].buffersrc_ctx  = NULL;

        filter_ctx[i].buffersink_ctx = NULL;

        filter_ctx[i].filter_graph   = NULL;

        if (!(ifmt_ctx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_AUDIO

              || ifmt_ctx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO))

            continue;

        if (ifmt_ctx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO)

            filter_spec = "null"; /* passthrough (dummy) filter for video */

        else

            filter_spec = "anull"; /* passthrough (dummy) filter for audio */

        ret = init_filter(&filter_ctx[i], stream_ctx[i].dec_ctx,

                          stream_ctx[i].enc_ctx, filter_spec);

        if (ret)

            return ret;

    }

    return 0;

}

static int encode_write_frame(AVFrame *filt_frame, unsigned int stream_index, int *got_frame) {

    int ret;

    int got_frame_local;

    AVPacket enc_pkt;

    int (*enc_func)(AVCodecContext *, AVPacket *, const AVFrame *, int *) =

    (ifmt_ctx->streams[stream_index]->codecpar->codec_type ==

     AVMEDIA_TYPE_VIDEO) ? avcodec_encode_video2 : avcodec_encode_audio2;

    if (!got_frame)

        got_frame = &got_frame_local;

    av_log(NULL, AV_LOG_INFO, "Encoding frame\n");

    /* encode filtered frame */

    enc_pkt.data = NULL;

    enc_pkt.size = 0;

    av_init_packet(&enc_pkt);

    ret = enc_func(stream_ctx[stream_index].enc_ctx, &enc_pkt,

                   filt_frame, got_frame);

    av_frame_free(&filt_frame);

    if (ret < 0)

        return ret;

    if (!(*got_frame))

        return 0;

    /* prepare packet for muxing */

    enc_pkt.stream_index = stream_index;

    av_packet_rescale_ts(&enc_pkt,

                         stream_ctx[stream_index].enc_ctx->time_base,

                         ofmt_ctx->streams[stream_index]->time_base);

    av_log(NULL, AV_LOG_DEBUG, "Muxing frame\n");

    /* mux encoded frame */

    ret = av_interleaved_write_frame(ofmt_ctx, &enc_pkt);

    return ret;

}

static int filter_encode_write_frame(AVFrame *frame, unsigned int stream_index)

{

    int ret;

    AVFrame *filt_frame;

    av_log(NULL, AV_LOG_INFO, "Pushing decoded frame to filters\n");

    /* push the decoded frame into the filtergraph */

    ret = av_buffersrc_add_frame_flags(filter_ctx[stream_index].buffersrc_ctx,

                                       frame, 0);

    if (ret < 0) {

        av_log(NULL, AV_LOG_ERROR, "Error while feeding the filtergraph\n");

        return ret;

    }

    /* pull filtered frames from the filtergraph */

    while (1) {

        filt_frame = av_frame_alloc();

        if (!filt_frame) {

            ret = AVERROR(ENOMEM);

            break;

        }

        av_log(NULL, AV_LOG_INFO, "Pulling filtered frame from filters\n");

        ret = av_buffersink_get_frame(filter_ctx[stream_index].buffersink_ctx,

                                      filt_frame);

        if (ret < 0) {

            /* if no more frames for output - returns AVERROR(EAGAIN)

             * if flushed and no more frames for output - returns AVERROR_EOF

             * rewrite retcode to 0 to show it as normal procedure completion

             */

            if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)

                ret = 0;

            av_frame_free(&filt_frame);

            break;

        }

        filt_frame->pict_type = AV_PICTURE_TYPE_NONE;

        ret = encode_write_frame(filt_frame, stream_index, NULL);

        if (ret < 0)

            break;

    }

    return ret;

}

static int flush_encoder(unsigned int stream_index)

{

    int ret;

    int got_frame;

    if (!(stream_ctx[stream_index].enc_ctx->codec->capabilities &

          AV_CODEC_CAP_DELAY))

        return 0;

    while (1) {

        av_log(NULL, AV_LOG_INFO, "Flushing stream #%u encoder\n", stream_index);

        ret = encode_write_frame(NULL, stream_index, &got_frame);

        if (ret < 0)

            break;

        if (!got_frame)

            return 0;

    }

    return ret;

}


-(void)transcodingFileWithPath:(NSString *)inputPath saveTo:(NSString *)outputPath onProgress:(onProgress)onprogress onComplete:(void (^)(BOOL))oncomplete

{

    NSLog(@"transcodingFileWithPath");

    int ret;

    AVPacket packet = { .data = NULL, .size = 0 };

    AVFrame *frame = NULL;

    enum AVMediaType type;

    unsigned int stream_index;

    unsigned int i;

    int got_frame;

    int (*dec_func)(AVCodecContext *, AVFrame *, int *, const AVPacket *);

    av_register_all();

    avfilter_register_all();

    if ((ret = open_input_file([inputPath UTF8String])) < 0)

        goto end;

    if ((ret = open_output_file([outputPath UTF8String])) < 0)

        goto end;

    if ((ret = init_filters()) < 0)

        goto end;

    /* read all packets */

    while (1) {

        if ((ret = av_read_frame(ifmt_ctx, &packet)) < 0)

            break;

        stream_index = packet.stream_index;

        type = ifmt_ctx->streams[packet.stream_index]->codecpar->codec_type;

        av_log(NULL, AV_LOG_DEBUG, "Demuxer gave frame of stream_index %u\n",

               stream_index);

        if (filter_ctx[stream_index].filter_graph) {

            av_log(NULL, AV_LOG_DEBUG, "Going to reencode&filter the frame\n");

            frame = av_frame_alloc();

            if (!frame) {

                ret = AVERROR(ENOMEM);

                break;

            }

            av_packet_rescale_ts(&packet,

                                 ifmt_ctx->streams[stream_index]->time_base,

                                 stream_ctx[stream_index].dec_ctx->time_base);

            dec_func = (type == AVMEDIA_TYPE_VIDEO) ? avcodec_decode_video2 :

            avcodec_decode_audio4;

            ret = dec_func(stream_ctx[stream_index].dec_ctx, frame,

                           &got_frame, &packet);

            if (ret < 0) {

                av_frame_free(&frame);

                av_log(NULL, AV_LOG_ERROR, "Decoding failed\n");

                break;

            }

            if (got_frame) {

                frame->pts = frame->best_effort_timestamp;

                ret = filter_encode_write_frame(frame, stream_index);

                av_frame_free(&frame);

                if (ret < 0)

                    goto end;

            } else {

                av_frame_free(&frame);

            }

        } else {

            /* remux this frame without reencoding */

            av_packet_rescale_ts(&packet,

                                 ifmt_ctx->streams[stream_index]->time_base,

                                 ofmt_ctx->streams[stream_index]->time_base);

            ret = av_interleaved_write_frame(ofmt_ctx, &packet);

            if (ret < 0)

                goto end;

        }

        av_packet_unref(&packet);

    }

    /* flush filters and encoders */

    for (i = 0; i < ifmt_ctx->nb_streams; i++) {

        /* flush filter */

        if (!filter_ctx[i].filter_graph)

            continue;

        ret = filter_encode_write_frame(NULL, i);

        if (ret < 0) {

            av_log(NULL, AV_LOG_ERROR, "Flushing filter failed\n");

            goto end;

        }

        /* flush encoder */

        ret = flush_encoder(i);

        if (ret < 0) {

            av_log(NULL, AV_LOG_ERROR, "Flushing encoder failed\n");

            goto end;

        }

    }

    av_write_trailer(ofmt_ctx);

end:

    av_packet_unref(&packet);

    av_frame_free(&frame);

    for (i = 0; i < ifmt_ctx->nb_streams; i++) {

        avcodec_free_context(&stream_ctx[i].dec_ctx);

        if (ofmt_ctx && ofmt_ctx->nb_streams > i && ofmt_ctx->streams[i] && stream_ctx[i].enc_ctx)

            avcodec_free_context(&stream_ctx[i].enc_ctx);

        if (filter_ctx && filter_ctx[i].filter_graph)

            avfilter_graph_free(&filter_ctx[i].filter_graph);

    }

    av_free(filter_ctx);

    av_free(stream_ctx);

    avformat_close_input(&ifmt_ctx);

    if (ofmt_ctx && !(ofmt_ctx->oformat->flags & AVFMT_NOFILE))

        avio_closep(&ofmt_ctx->pb);

    avformat_free_context(ofmt_ctx);



    if (ret < 0)

        av_log(NULL, AV_LOG_ERROR, "Error occurred: %s\n", av_err2str(ret));



    if (self.checkCancellConver == YES) {

        //   [[NSFileManager defaultManager] removeItemAtPath:outputPath error:nil];

        oncomplete(false);

        return;

    }

    if (ret < 0) {

        NSLog(@"Fail to convert ************ ");

        oncomplete(false);

    }else {

        NSLog(@"************* \n inputPath:%@ \n outputPath:%@ \n**************",inputPath,outputPath);

        NSLog(@"Success *********** viswa");

        oncomplete(true);

    }



}


@end

———————————————————————————————————————————————————————————————————————————————————————————————————————


Debug console Error:
-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
transcodingFileWithPath
Input #0, mov,mp4,m4a,3gp,3g2,mj2, from '/Users/viswanath/Library/Developer/CoreSimulator/Devices/A71B16D3-86A9-4403-A6B1-9A8D23B842C0/data/Containers/Bundle/Application/0342E695-051C-4C71-9C54-581196CC2FE8/DLGDemo.app/VID_00222.mpg':
Metadata:
major_brand : isom
minor_version : 512
compatible_brands: isomiso2mp41
encoder : Lavf54.0.100
Duration: 00:00:30.08, start: 0.000000, bitrate: 1360 kb/s
Stream #0:0(und): Video: mpeg4 (Simple Profile) (mp4v / 0x7634706D), yuv420p, 1024x720 [SAR 1:1 DAR 64:45], 1226 kb/s, 25 fps, 25 tbr, 25 tbn, 25 tbc (default)
Metadata:
handler_name : VideoHandler
Stream #0:1(und): Audio: aac (LC) (mp4a / 0x6134706D), 44100 Hz, stereo, fltp, 128 kb/s (default)
Metadata:
handler_name : SoundHandler
[h264_videotoolbox @ 0x7f9354822600] Error setting max bitrate property: -12902
Cannot open video encoder for stream #0
Error occurred: Generic error in an external library

-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------

Thanks and regards,
Viswanath
This message contains information that may be privileged or confidential and is the property of the Capgemini Group. It is intended only for the person to whom it is addressed. If you are not the intended recipient, you are not authorized to read, print, retain, copy, disseminate, distribute, or use this message or any part thereof. If you receive this message in error, please notify the sender immediately and delete all copies of this message.
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://ffmpeg.org/pipermail/libav-user/attachments/20180228/0f4c28a8/attachment.html>


More information about the Libav-user mailing list