[Libav-user] get RGB values from ffmpeg frame

Navin nkipe at tatapowersed.com
Tue Nov 20 12:08:58 CET 2012


Hey thanks everyone! Still trying/struggling with the code sent by Malik 
because avcodec_decode_video is deprecated.
Thought I'd post the entire code here for anyone who could help or for 
anyone who needs such code in future. This is the corrected version of 
ffmpeg's tutorial02, with SDL added to it too.
Problems are:
1. Since I was facing trouble with RGB, I thought I'd use SDL to output 
the frames as bitmaps, but although I could store the bitmap in an area 
in memory, it threw an exception when I tried extracting it out of memory.
2. Besides, when at least trying to output the bitmap to hard disk, I'm 
unable to output it unless SDL_DisplayYUVOverlay(bmp, &rect); is called 
(and I don't want to call it because I don't want the video to be 
displayed. I just want the bitmap to be output).
3. Main problem is still with extracting RGB, so would be grateful for 
any help. If not RGB, then at least with question 1 (how to store and 
extract the BMP from SDL memory).

The code:

extern "C"
{
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
#include <libswscale/swscale.h>
}
#include<iostream>
#include <SDL.h>
#include <SDL_thread.h>
#include "SDL_opengl.h"
#include <gl/gl.h>
#include <gl/glu.h>

//#ifdef __MINGW32__
#undef main /* Prevents SDL from overriding main() */
//#endif

#include <stdio.h>



//------------------------
// These functions should not be used except from pointers in a RWops
static int myseekfunc(SDL_RWops *context, int offset, int whence) { 
SDL_SetError("Can't seek in this kind of RWops"); return(-1); }
static int myreadfunc(SDL_RWops *context, void *ptr, int size, int 
maxnum) { memset(ptr,0,size*maxnum); return(maxnum); }
static int mywritefunc(SDL_RWops *context, const void *ptr, int size, 
int num) { return(num); }
static int myclosefunc(SDL_RWops *context)
{
   if(context->type != 0xdeadbeef) { SDL_SetError("Wrong kind of RWops 
for myclosefunc()"); return(-1); }
   free(context->hidden.unknown.data1);
   SDL_FreeRW(context);
   return(0);
}

// Note that this function is NOT static -- we want it directly callable 
from other source files
SDL_RWops *MyCustomRWop()
{
   SDL_RWops *c=SDL_AllocRW();
   if(c==NULL) return(NULL);

   c->seek =myseekfunc;
   c->read =myreadfunc;
   c->write=mywritefunc;
   c->close=myclosefunc;
   c->type =0xdeadbeef;
   printf("deadbeef=%d\n",c->type);
   c->hidden.unknown.data1=malloc(100000);
   return(c);
}


int main(int argc, char *argv[])
{
   AVFormatContext *pFormatCtx = NULL;
   int             i, videoStream;
   AVCodecContext  *pCodecCtx = NULL;
   AVCodec         *pCodec = NULL;
   AVFrame         *pFrame = NULL;
   AVPacket        packet;
   int             frameFinished;
   //float           aspect_ratio;

   AVDictionary    *optionsDict = NULL;
   struct SwsContext *sws_ctx = NULL;

   SDL_Overlay     *bmp = NULL;
   SDL_Surface     *screen = NULL;
   SDL_Surface     *screen2 = NULL;
   SDL_Surface     *rgbscreen = NULL;
   SDL_Rect        rect;
   SDL_Event       event;


   if(argc < 2) { fprintf(stderr, "Usage: test.exe <videofile>\n"); 
exit(1); }
   // Register all formats and codecs
   av_register_all();

   if(SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER)) {  
fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());   
exit(1); }

   // Open video file
   if(avformat_open_input(&pFormatCtx, argv[1], NULL, NULL)!=0)    
return -1; // Couldn't open file

   // Retrieve stream information
   if(avformat_find_stream_info(pFormatCtx, NULL)<0)    return -1; // 
Couldn't find stream information

   // Dump information about file onto standard error
   //av_dump_format(pFormatCtx, 0, argv[1], 0);

   // Find the first video stream
   videoStream=-1;
   for(i=0; i<pFormatCtx->nb_streams; i++)    
if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO) { 
videoStream=i;  break; }
   if(videoStream==-1)    return -1; // Didn't find a video stream

   // Get a pointer to the codec context for the video stream
   pCodecCtx=pFormatCtx->streams[videoStream]->codec;

   // Find the decoder for the video stream
   pCodec=avcodec_find_decoder(pCodecCtx->codec_id);
   if(pCodec==NULL) {    fprintf(stderr, "Unsupported codec!\n");    
return -1;} // Codec not found

   // Open codec
   if(avcodec_open2(pCodecCtx, pCodec, &optionsDict) < 0)    return -1; 
// Could not open codec

   // Allocate video frame
   pFrame = avcodec_alloc_frame();

#if defined(TRIAL)
     int width1 = pCodecCtx->width, height1 = pCodecCtx->height, width2 
= pCodecCtx->width, height2 = pCodecCtx->height;
     struct SwsContext *resize = sws_getContext(width1, height1, 
PIX_FMT_YUV420P, width2, height2, PIX_FMT_RGB24, SWS_BICUBIC, NULL, 
NULL, NULL);
     AVFrame* frame1 = avcodec_alloc_frame(); // this is your original frame
     AVFrame* frame2 = avcodec_alloc_frame();
     int num_bytes = avpicture_get_size(PIX_FMT_RGB24, width2, height2);
     uint8_t* frame2_buffer = (uint8_t *) av_malloc(num_bytes * 
sizeof(uint8_t));

#endif

   // Make a screen to put our video
#ifndef __DARWIN__
         screen = SDL_SetVideoMode(pCodecCtx->width, pCodecCtx->height, 
0, 0);
#else
         screen = SDL_SetVideoMode(pCodecCtx->width, pCodecCtx->height, 
24, 0);
#endif
   if(!screen) {    fprintf(stderr, "SDL: could not set video mode - 
exiting\n");    exit(1);  }

   // Allocate a place to put our YUV image on that screen
   bmp = SDL_CreateYUVOverlay(pCodecCtx->width,    
  pCodecCtx->height,     SDL_YV12_OVERLAY,     screen);

   sws_ctx =  sws_getContext(pCodecCtx->width, pCodecCtx->height, 
pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height, 
PIX_FMT_YUV420P, SWS_BILINEAR, NULL, NULL, NULL );

   // Read frames and save first five frames to disk
   i=0;
   while(av_read_frame(pFormatCtx, &packet) >= 0)
   {
     // Is this a packet from the video stream?
     if(packet.stream_index==videoStream)
     {
     // Decode video frame
     avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished, &packet);

     // Did we get a video frame?
     if(frameFinished)
     {
#if defined(TRIAL)
         avpicture_fill((AVPicture*) frame2, frame2_buffer, 
PIX_FMT_RGB24, width2, height2);
         printf("data = %d \n",frame2->data[0]);
         printf("linesize = %d %d %d\n",frame2->linesize[0], 
frame2->linesize[1], frame2->linesize[2]);
         printf("width = %d\n", pCodecCtx->width);
         printf("height = %d\n", pCodecCtx->height);
         std::cin.get();

         int linesize = frame2->linesize[0];
         for(int xx = 0; xx < (linesize * width1)-1; xx += 3)
         {
             int r = frame2->data[0][xx];//int r = frame2->data[0][xx];
             int g = frame2->data[0][xx+1];
             int b = frame2->data[0][xx+2];
             printf("xx=%d                 r=%d, g=%d, b=%d \n",xx, r, 
g, b);
         }
         printf("frame%d done----------------",i++);
         //for(int xx = 0; xx < width1; xx = xx + 3)
         //{
         //    for(int yy = 0; yy < height1; ++yy)
         //    {
         //        //int p = xx*3 + yy*frame2->linesize[0];
         //        //int p = xx * 3 + yy * linesize;
         //        printf("yy=%d xx=%d",yy,xx);
         //        int p = yy * linesize + xx;
         //        printf("p=%d\n",p);
         //        int r = frame2->data[0][p];
         //        int g = frame2->data[0][p+1];
         //        int b = frame2->data[0][p+2];
         //        printf("[r=%d, g=%d, b=%d ]\n", r, g, b);
         //    }//for
         //}//for


         // frame1 should be filled by now (eg using avcodec_decode_video)
         sws_scale(resize, frame1->data, frame1->linesize, 0, height1, 
frame2->data, frame2->linesize);
#endif

         SDL_LockYUVOverlay(bmp);//-----------lock

         AVPicture pict;
         pict.data[0] = bmp->pixels[0];
         pict.data[1] = bmp->pixels[2];
         pict.data[2] = bmp->pixels[1];

         pict.linesize[0] = bmp->pitches[0];
         pict.linesize[1] = bmp->pitches[2];
         pict.linesize[2] = bmp->pitches[1];

         // Convert the image into YUV format that SDL uses
         sws_scale( sws_ctx, (uint8_t const * const *)pFrame->data, 
pFrame->linesize, 0, pCodecCtx->height, pict.data, pict.linesize );

         SDL_UnlockYUVOverlay(bmp);//-----------unlock

         rect.x = 0;
         rect.y = 0;
         rect.w = pCodecCtx->width;
         rect.h = pCodecCtx->height;
         SDL_DisplayYUVOverlay(bmp, &rect);

         //printf("sizeof screen = %d\n",sizeof(*screen));

         if (++i != 0)
         {
             char numberbuffer [33], str[80]; strcpy (str, "bitmap"); 
strcat (str, itoa(i, numberbuffer, 10)); strcat (str, ".bmp");//file for 
storing image
             //---------this saves it as a bitmap to filestream. But now 
how to extract it?
             //SDL_RWops *filestream = MyCustomRWop();//SDL_AllocRW();
             //SDL_SaveBMP_RW (screen, filestream, i);

             //screen2 = SDL_LoadBMP_RW(filestream,1);//LOADING IS THE 
PROBLEM HERE. DON'T KNOW WHY
             //filestream->close;
             //SDL_SaveBMP(screen2, str);

             SDL_SaveBMP(screen, str);//WORKS: saves frame to a file as 
a bitmap
         }

       }//if(frameFinished)
     }//if

     // Free the packet that was allocated by av_read_frame
     av_free_packet(&packet);
     SDL_PollEvent(&event);
     switch(event.type)
     {
     case SDL_QUIT:
         SDL_Quit();
         exit(0);
         break;
     default:
         break;
     }//switch
   }//while

   av_free(pFrame);// Free the YUV frame
   avcodec_close(pCodecCtx);// Close the codec
   avformat_close_input(&pFormatCtx);// Close the video file

   return 0;
}//main

Nav

On 11/20/2012 3:38 PM, Carl Eugen Hoyos wrote:
> Navin<nkipe at ...>  writes:
>
>> I have been through many websites, but they either use
>> img_convert (which doesn't work) or sws_scale, which
>> crashes when I try to use it with RGB.
> I may miss something, but I guess this is the important
> problem you should work on (see doc/examples).
>
> Possibly unrelated:
> In your source code, I did not find information which
> decoder you are using.
> Some decoders (everything mpeg-related) output yuv, and
> if you want the data in rgb, you have to convert it
> (using the sofware scaler), others (some lossless formats)
> output rgb, that may already be the data you want.
>
> Carl Eugen
>
> _______________________________________________
> Libav-user mailing list
> Libav-user at ffmpeg.org
> http://ffmpeg.org/mailman/listinfo/libav-user
>


More information about the Libav-user mailing list