[Libav-user] [segmentation fault]video encode question

huyanpingz huyanpingz at 126.com
Tue Mar 7 03:30:48 EET 2017


Dear all,
I am trying to decode an flv file to Frame and encode them after some modifies, then I will send it to a rtmp server. but I got segmentation fault when I called the `av_write_frame`. I have searched in google and other website, but it doesn’t help.


compile command:gcc src/encode.c -o encode.out -g -v -I /usr/local/ffmpeg/include -L /usr/local/ffmpeg/lib -lavformat -lavcodec -lavutil
run command:encode.out /tmp/out.flv rtmp://domain/live/livestream
code is here:
#include math.h

#include libavutil/opt.h
#include libavcodec/avcodec.h
#include libavutil/channel_layout.h
#include libavutil/common.h
#include libavutil/imgutils.h
#include libavutil/mathematics.h
#include libavutil/samplefmt.h
#include libavutil/timestamp.h
#include libavformat/avformat.h
#include libavutil/mathematics.h
#include libavutil/time.h
#include libswscale/swscale.h
#include libavutil/log.h
#include libavutil/mathematics.h
#include stdio.h


#define INBUF_SIZE 4096
#define AUDIO_INBUF_SIZE 20480
#define AUDIO_REFILL_THRESH 4096

// compatibility with newer API
#if LIBAVCODEC_VERSION_INT  AV_VERSION_INT(55, 28, 1)
#define av_frame_alloc avcodec_alloc_frame
#define av_frame_free avcodec_free_frame
#endif

#define STREAM_DURATION 20
#define STREAM_FRAME_RATE 25 /* 25 images/s */
#define STREAM_PIX_FMT AV_PIX_FMT_YUV420P /* default pix_fmt */ //AV_PIX_FMT_NV12;
#define VIDEO_CODEC_ID CODEC_ID_H264

/* video output */
static AVFrame *frame;

static AVPicture src_picture, dst_picture;

AVCodec *encoder;
AVCodecContext *outputCodecCtx = NULL;
AVFormatContext *outputFormatCtx;

AVFormatContext *inputFormatCtx = NULL;
AVCodecContext *inputCodecCtx = NULL;
int video_index = -1;


static int open_video(AVFormatContext *oc, AVCodec *codec, AVStream *st)
{
 int ret;
 AVCodecContext *c = st-codec;

 /* open the codec */
 ret = avcodec_open2(c, codec, NULL);
 if (ret  0) {
 av_log(NULL, AV_LOG_ERROR, "Could not open video codec.\n", avcodec_get_name(c-codec_id));
 }
 else {

 /* allocate and init a re-usable frame */
 frame = av_frame_alloc();
 if (!frame) {
 av_log(NULL, AV_LOG_ERROR, "Could not allocate video frame.\n");
 ret = -1;
 }
 else {
 frame-format = c-pix_fmt;
 frame-width = c-width;
 frame-height = c-height;

 /* Allocate the encoded raw picture. */
 ret = avpicture_alloc(dst_picture, c-pix_fmt, c-width, c-height);
 if (ret  0) {
 av_log(NULL, AV_LOG_ERROR, "Could not allocate picture.\n");
 }
 else {
 /* copy data and linesize picture pointers to frame */
 *((AVPicture *)frame) = dst_picture;
 }
 }
 }

 return ret;
}


static void fill_yuv_image(AVPicture *pict, int frame_index, int width, int height)
{
 int x, y, i;

 i = frame_index;

 /* Y */
 for (y = 0; y  height; y++)
 for (x = 0; x  width; x++)
 pict-data[0][y * pict-linesize[0] + x] = x + y + i * 3;

 /* Cb and Cr */
 for (y = 0; y  height / 2; y++) {
 for (x = 0; x  width / 2; x++) {
 pict-data[1][y * pict-linesize[1] + x] = 128 + y + i * 2;
 pict-data[2][y * pict-linesize[2] + x] = 64 + x + i * 5;
 }
 }
}

static int write_video_frame(AVFormatContext *oc, AVStream *st, int frameCount, AVFrame *frame, int64_t start_time)
{
 int ret = 0;
 AVCodecContext *c = st-codec;
 printf("start_time %d\n", start_time);

 fill_yuv_image(dst_picture, frameCount, c-width, c-height);

 AVPacket pkt = { 0 };
 int got_packet;
 av_init_packet(pkt);

 /* encode the image */
 frame-pts = frameCount;
 ret = avcodec_encode_video2(c, pkt, frame, got_packet);
 if (ret  0) {
 av_log(NULL, AV_LOG_ERROR, "Error encoding video frame.\n");
 }
 else {
 if (got_packet) {
 pkt.stream_index = st-index;
 pkt.pts = av_rescale_q_rnd(pkt.pts, inputFormatCtx-streams[video_index]-time_base,
 outputFormatCtx-streams[st-index]-time_base, AV_ROUND_NEAR_INF);
 ret = av_write_frame(oc, pkt);

 AVRational time_base = inputFormatCtx-streams[video_index]-time_base;
 AVRational time_base_q = {1, AV_TIME_BASE};
 int64_t pts_time = av_rescale_q(pkt.pts, time_base, time_base_q);
 int64_t now_time = av_gettime() - start_time;
 int64_t sleep_time = pts_time - now_time;
 printf("pts time %d, now_time %d, start_time %d\n", pts_time, now_time, start_time);
 if (pts_time  now_time){
 printf("sleep %d\n", sleep_time);
 av_usleep(pts_time - now_time);
 }

 if (ret  0) {
 av_log(NULL, AV_LOG_ERROR, "Error while writing video frame.\n");
 }
 }
 }

 return ret;
}

int open_output_file(const char* filename) {
 avformat_alloc_output_context2(outputFormatCtx, NULL, "flv", filename);

 if (!outputFormatCtx) {
 av_log(NULL, AV_LOG_FATAL, "Could not allocate an output context for '%s'.\n", filename);
 return -1;
 }

 if (!outputFormatCtx-oformat) {
 av_log(NULL, AV_LOG_FATAL, "Could not create the output format for '%s'.\n", filename);
 return -1;
 }
}

static AVStream *add_stream(AVFormatContext *oc, AVCodec **codec, enum AVCodecID codec_id)
{
 AVCodecContext *c, *input_c;
 AVStream *st, *input_stream;

 input_stream = inputFormatCtx-streams[video_index];
 input_c = input_stream-codec;

 /* find the encoder */
 *codec = avcodec_find_encoder(codec_id);
 if (!(*codec)) {
 av_log(NULL, AV_LOG_ERROR, "Could not find encoder for '%s'.\n", avcodec_get_name(codec_id));
 }
 else {
 st = avformat_new_stream(oc, *codec);
 if (!st) {
 av_log(NULL, AV_LOG_ERROR, "Could not allocate stream.\n");
 }
 else {
 st-id = oc-nb_streams - 1;
 st-time_base.den = st-pts.den = input_stream-time_base.den;
 st-time_base.num = st-pts.num = input_stream-time_base.num;

 c = st-codec;
 c-codec_id = codec_id;
 c-bit_rate = input_c-bit_rate;
 c-width = input_c-width;
 c-height = input_c-height;
 c-time_base.den = input_c-time_base.den;
 c-time_base.num = input_c-time_base.num;
 c-gop_size = input_c-gop_size; /* emit one intra frame every twelve frames at most */
 c-pix_fmt = input_c-pix_fmt;
 }
 }

 return st;
}

int open_output_file2(const char *filename) {
 AVStream *out_stream;
 AVStream *in_stream;
 AVCodecContext *dec_ctx, *enc_ctx;
 AVCodec *encoder;
 int ret;
 unsigned int i;

 outputFormatCtx = NULL;
 avformat_alloc_output_context2(outputFormatCtx, NULL, "flv", filename);
 if (!outputFormatCtx) {
 av_log(NULL, AV_LOG_ERROR, "Could not create output context\n");
 return AVERROR_UNKNOWN;
 }


 for (i = 0; i  inputFormatCtx-nb_streams; i++) {
 out_stream = avformat_new_stream(outputFormatCtx, NULL);
 if (!out_stream) {
 av_log(NULL, AV_LOG_ERROR, "Failed allocating output stream\n");
 return AVERROR_UNKNOWN;
 }

 in_stream = inputFormatCtx-streams[i];
 dec_ctx = in_stream-codec;
 enc_ctx = out_stream-codec;

 if (dec_ctx-codec_type == AVMEDIA_TYPE_VIDEO
 || dec_ctx-codec_type == AVMEDIA_TYPE_AUDIO) {
 /* in this example, we choose transcoding to same codec */
 encoder = avcodec_find_encoder(dec_ctx-codec_id);
 if (!encoder) {
 av_log(NULL, AV_LOG_FATAL, "Necessary encoder not found\n");
 return AVERROR_INVALIDDATA;
 }

 /* In this example, we transcode to same properties (picture size,
 * sample rate etc.). These properties can be changed for output
 * streams easily using filters */
 if (dec_ctx-codec_type == AVMEDIA_TYPE_VIDEO) {
 enc_ctx-height = dec_ctx-height;
 enc_ctx-width = dec_ctx-width;
 enc_ctx-sample_aspect_ratio = dec_ctx-sample_aspect_ratio;
 /* take first format from list of supported formats */
 if (encoder-pix_fmts)
 enc_ctx-pix_fmt = encoder-pix_fmts[0];
 else
 enc_ctx-pix_fmt = dec_ctx-pix_fmt;
 /* video time_base can be set to whatever is handy and supported by encoder */
 enc_ctx-time_base = dec_ctx-time_base;
 } else {
 enc_ctx-sample_rate = dec_ctx-sample_rate;
 enc_ctx-channel_layout = dec_ctx-channel_layout;
 enc_ctx-channels = av_get_channel_layout_nb_channels(enc_ctx-channel_layout);
 /* take first format from list of supported formats */
 enc_ctx-sample_fmt = encoder-sample_fmts[0];
 enc_ctx-time_base = (AVRational){1, enc_ctx-sample_rate};
 }

 /* Third parameter can be used to pass settings to encoder */
 ret = avcodec_open2(enc_ctx, encoder, NULL);
 if (ret  0) {
 av_log(NULL, AV_LOG_ERROR, "Cannot open video encoder for stream #%u\n", i);
 return ret;
 }
 } else if (dec_ctx-codec_type == AVMEDIA_TYPE_UNKNOWN) {
 av_log(NULL, AV_LOG_FATAL, "Elementary stream #%d is of unknown type, cannot proceed\n", i);
 return AVERROR_INVALIDDATA;
 } else {
 /* if this stream must be remuxed */
 ret = avcodec_copy_context(outputFormatCtx-streams[i]-codec,
 inputFormatCtx-streams[i]-codec);
 if (ret  0) {
 av_log(NULL, AV_LOG_ERROR, "Copying stream context failed\n");
 return ret;
 }
 }

 if (outputFormatCtx-oformat-flags  AVFMT_GLOBALHEADER)
 enc_ctx-flags |= AV_CODEC_FLAG_GLOBAL_HEADER;

 }
 av_dump_format(outputFormatCtx, 0, filename, 1);

 if (!(outputFormatCtx-oformat-flags  AVFMT_NOFILE)) {
 ret = avio_open(outputFormatCtx-pb, filename, AVIO_FLAG_WRITE);
 if (ret  0) {
 av_log(NULL, AV_LOG_ERROR, "Could not open output file '%s'", filename);
 return ret;
 }
 }

 /* init muxer, write output file header */
 ret = avformat_write_header(outputFormatCtx, NULL);
 if (ret  0) {
 av_log(NULL, AV_LOG_ERROR, "Error occurred when opening output file\n");
 return ret;
 }

 return 0;
}

int encode(AVFrame *pFrame, AVPacket *packet) {
 AVCodecContext * context = outputFormatCtx-streams[0]-codec;
 int got_packet, ret;
 av_init_packet(packet);
 ret = avcodec_encode_video2(context, packet, pFrame, got_packet);

 if(ret  0) {
 av_log(context, AV_LOG_ERROR, "encode failed\n");
 return -1;
 }

 if(got_packet) {
 return 0;
 }

 return 1;
}

static void log_packet(const AVFormatContext *fmt_ctx, const AVPacket *pkt)
{
 AVRational *time_base = fmt_ctx-streams[pkt-stream_index]-time_base;

 printf("pts:%s pts_time:%s dts:%s dts_time:%s duration:%s duration_time:%s stream_index:%d\n",
 av_ts2str(pkt-pts), av_ts2timestr(pkt-pts, time_base),
 av_ts2str(pkt-dts), av_ts2timestr(pkt-dts, time_base),
 av_ts2str(pkt-duration), av_ts2timestr(pkt-duration, time_base),
 pkt-stream_index);
}

static int open_input_file(char *filename)
{
 AVCodecContext *inputCodecCtxOrig = NULL;
 AVCodec *pCodec = NULL;
 int ret, i;

 if((ret = avformat_open_input(inputFormatCtx, filename, NULL, NULL)) != 0) {
 av_log(inputFormatCtx, AV_LOG_ERROR, "could not open input file\n");
 return ret;
 }

 if((ret = avformat_find_stream_info(inputFormatCtx, NULL))  0) {
 av_log(inputFormatCtx, AV_LOG_ERROR, "could not find stream info");
 return ret;
 }

 // Dump information about file onto standard error
 av_dump_format(inputFormatCtx, 0, filename, 0);

 // Find the first video stream
 for (i = 0; i  inputFormatCtx-nb_streams; i++)
 if (inputFormatCtx-streams[i]-codec-codec_type == AVMEDIA_TYPE_VIDEO) {
 video_index = i;
 break;
 }
 if (video_index == -1)
 return -1; // Didn't find a video stream


 // Get a pointer to the codec context for the video stream
 inputCodecCtxOrig = inputFormatCtx-streams[video_index]-codec;
 // Find the decoder for the video stream
 pCodec = avcodec_find_decoder(inputCodecCtxOrig-codec_id);
 if (pCodec == NULL) {
 av_log(inputFormatCtx, AV_LOG_ERROR, "Unsupported codec!\n");
 return -1; // Codec not found
 }
 // Copy context
 inputCodecCtx = avcodec_alloc_context3(pCodec);
 if (avcodec_copy_context(inputCodecCtx, inputCodecCtxOrig) != 0) {
 av_log(inputCodecCtx, AV_LOG_ERROR, "Couldn't copy codec context");
 return -1; // Error copying codec context
 }

 // Open codec
 if (avcodec_open2(inputCodecCtx, pCodec, NULL)  0)
 return -1; // Could not open codec

 return 0;
}

static int decode(AVFrame *pFrame)
{
 AVPacket packet;
 int ret, frameFinished;
 while (av_read_frame(inputFormatCtx, packet) = 0) {
 log_packet(inputFormatCtx, packet);
 // Is this a packet from the video stream?
 if (packet.stream_index == video_index) {
 // Decode video frame
 ret = avcodec_decode_video2(inputCodecCtx, pFrame, frameFinished, packet);
 if(ret  0) {
 av_log(inputCodecCtx, AV_LOG_ERROR, "could not decode package\n");
 return -1;
 }
 // Did we get a video frame?
 if (frameFinished) {
 return 0;
 }
 }

 // Free the packet that was allocated by av_read_frame
 av_free_packet(packet);
 }

 return -1;
}


int main(int argc, char* argv[]) {
 av_register_all();
 avformat_network_init();
 av_log_set_level(AV_LOG_DEBUG);
 AVStream *video_st;
 AVFrame *pFrame;
 AVCodec *video_codec;
 int ret;

 open_input_file(argv[1]);
 open_output_file(argv[2]);
 video_st = add_stream(outputFormatCtx, video_codec, AV_CODEC_ID_H264);

 /* Now that all the parameters are set, we can open the video codec and allocate the necessary encode buffers. */
 if (video_st) {
 av_log(NULL, AV_LOG_DEBUG, "Video stream codec %s.\n ", avcodec_get_name(video_st-codec-codec_id));

 ret = open_video(outputFormatCtx, video_codec, video_st);
 if (ret  0) {
 av_log(NULL, AV_LOG_FATAL, "Open video stream failed.\n");
 return -1;
 }
 }
 else {
 av_log(NULL, AV_LOG_FATAL, "Add video stream for the codec '%s' failed.\n", avcodec_get_name(AV_CODEC_ID_H264));
 return -1;
 }

 av_dump_format(outputFormatCtx, 0, argv[2], 1);
 ret = avformat_write_header(outputFormatCtx, NULL);
 if (ret != 0) {
 av_log(NULL, AV_LOG_ERROR, "Failed to connect to RTSP server for '%s'.\n", argv[2]);
 return -1;
 }

 AVFormatContext *out = outputFormatCtx;

 // Allocate video frame
 pFrame = av_frame_alloc();


 int frameCount = 0;
 int64_t start_time = av_gettime();
 printf("start_time %d \n", start_time);
 while(decode(pFrame) == 0) {
 frameCount++;
 // do something with frame
 AVPacket packet = {0};
 write_video_frame(outputFormatCtx, video_st, frameCount, pFrame, start_time);

// ret = encode(pFrame, packet);
// printf("ret:%d\n", ret);
// if(ret != 0) continue;
// ret = av_write_frame(out, packet);
//
// printf("out format:\n");
 //log_packet(outputFormatCtx, packet);
// if(ret  0) {
// av_log(outputFormatCtx, AV_LOG_ERROR, "write frame failed\n");
// }
// ret = flush_encoder(outputFormatCtx, 0);
// if (ret  0) {
// printf("Flushing encoder failed\n");
// return -1;
// }
 }
}
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://ffmpeg.org/pipermail/libav-user/attachments/20170307/cdfc52d3/attachment.html>


More information about the Libav-user mailing list