ffmpeg教程01

发布时间:2018-04-12 22:10:10
ffmpeg 
// tutorial01.c
// Code based on a tutorial by Martin Bohme (boehme@inb.uni-luebeckREMOVETHIS.de)
// Tested on Gentoo, CVS version 5/01/07 compiled with GCC 4.1.1
// With updates from https://github.com/chelyaev/ffmpeg-tutorial
// Updates tested on:
// LAVC 54.59.100, LAVF 54.29.104, LSWS 2.1.101
// on GCC 4.7.2 in Debian February 2015

// A small sample program that shows how to use libavformat and libavcodec to
// read video from a file.
//
// Use
//
// gcc -o tutorial01 tutorial01.c -lavformat -lavcodec -lswscale -lz
//
// to build (assuming libavformat and libavcodec are correctly installed
// your system).
//
// Run using
//
// tutorial01 myvideofile.mpg
//
// to write the first five frames from "myvideofile.mpg" to disk in PPM
// format.

#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
#include <libswscale/swscale.h>

#include <stdio.h>

// compatibility with newer API
#if LIBAVCODEC_VERSION_INT < AV_VERSION_INT(55,28,1)
#define av_frame_alloc avcodec_alloc_frame
#define av_frame_free avcodec_free_frame
#endif

//A PPM file is simply a file that has RGB information laid out in a long string.
void SaveFrame(AVFrame *pFrame, int width, int height, int iFrame) {
    FILE *pFile;
    char szFilename[64];
    int  y;
    
    // Open file
    sprintf(szFilename, "/Users/zhangyoulun/temp/frame%d.ppm", iFrame);
    pFile=fopen(szFilename, "wb");
    if(pFile==NULL)
        return;
    
    // Write header
    fprintf(pFile, "P6\n%d %d\n255\n", width, height);
    
    // Write pixel data
    for(y=0; y<height; y++)
        fwrite(pFrame->data[0]+y*pFrame->linesize[0], 1, width*3, pFile);
    
    // Close file
    fclose(pFile);
}

int main(int argc, char *argv[]) {
    // Initalizing these to NULL prevents segfaults!
    AVFormatContext   *pFormatCtx = NULL;
    int               i, videoStream;
    AVCodecContext    *pCodecCtxOrig = NULL;
    AVCodecContext    *pCodecCtx = NULL;
    AVCodec           *pCodec = NULL;
    AVFrame           *pFrame = NULL;
    AVFrame           *pFrameRGB = NULL;
    AVPacket          packet;
    int               frameFinished;
    int               numBytes;
    uint8_t           *buffer = NULL;
    struct SwsContext *sws_ctx = NULL;
    
    if(argc < 2) {
        printf("Please provide a movie file\n");
        return -1;
    }
    // Register all formats and codecs
    // This registers all available file formats and codecs with the library so they will be used automatically when a file with the corresponding format/codec is opened.
    // 注册所有可用的文件formats和codecs,当打开文件时,就可以使用相应的format/codec
    av_register_all();
    
    // Open video file
    // This function reads the file header and stores information about the file format in the AVFormatContext structure we have given it.
    // 这个函数读取文件头,并存储文件格式的信息到AVFormatContext结构体中
    // 这个函数只查看文件头
    if(avformat_open_input(&pFormatCtx, argv[1], NULL, NULL)!=0)
        return -1; // Couldn't open file
    
    // Retrieve stream information
    // check out the stream information in the file
    // 检查文件中的流信息
    // This function populates pFormatCtx->streams with the proper information.
    // 这个函数会使用正确的数据填充pFormatCtx->streams
    if(avformat_find_stream_info(pFormatCtx, NULL)<0)
        return -1; // Couldn't find stream information
    
    // Dump information about file onto standard error
    /*
     导出类似如下的内容
     Input #0, mpegts, from '/temp/aaa.ts':
     Duration: 00:00:11.13, start: 13.926000, bitrate: 550 kb/s
     Program 1
     Stream #0:0[0x101]: Audio: aac (LC) ([15][0][0][0] / 0x000F), 44100 Hz, stereo, fltp, 89 kb/s
     Stream #0:1[0x100]: Video: h264 (Main) ([27][0][0][0] / 0x001B), yuv420p(progressive), 640x480, 18 fps, 18 tbr, 90k tbn, 180k tbc
     */
    av_dump_format(pFormatCtx, 0, argv[1], 0);
    
    // Find the first video stream
    // Now pFormatCtx->streams is just an array of pointers, of size pFormatCtx->nb_streams, so let's walk through it until we find a video stream.
    // pFormatCtx->streams是一个数组,数组的长度是pFormatCtx->nb_streams,现在我们找到一个video stream
    videoStream=-1;
    for(i=0; i<pFormatCtx->nb_streams; i++)
        if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO) {
            videoStream=i;
            break;
        }
    if(videoStream==-1)
        return -1; // Didn't find a video stream
    
    // The stream's information about the codec is in what we call the "codec context."
    // 流信息中关于codec,我们称之为codec context
    // This contains all the information about the codec that the stream is using, and now we have a pointer to it.
    // codec context包含了流正在使用的所有codec信息,我们需要用指针指向它
    // But we still have to find the actual codec and open it
    // 并且我们需要找到实际的codec,并打开它
    // Note that we must not use the AVCodecContext from the video stream directly! So we have to use avcodec_copy_context() to copy the context to a new location (after allocating memory for it, of course).
    // 注意,我们不能直接使用video stream中的AVCodecContext,所以我们需要avcodec_copy_context()拷贝这个context到一个新的位置
    
    // Get a pointer to the codec context for the video stream
    pCodecCtxOrig=pFormatCtx->streams[videoStream]->codec;
    // Find the decoder for the video stream
    pCodec=avcodec_find_decoder(pCodecCtxOrig->codec_id);
    if(pCodec==NULL) {
        fprintf(stderr, "Unsupported codec!\n");
        return -1; // Codec not found
    }
    // Copy context
    pCodecCtx = avcodec_alloc_context3(pCodec);
    if(avcodec_copy_context(pCodecCtx, pCodecCtxOrig) != 0) {
        fprintf(stderr, "Couldn't copy codec context");
        return -1; // Error copying codec context
    }
    
    // Open codec
    // Initialize the AVCodecContext to use the given AVCodec.
    // 使用AVCodec初始化AVCodecContext
    // 原型如下
    // int avcodec_open2(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options);
    if(avcodec_open2(pCodecCtx, pCodec, NULL)<0)
        return -1; // Could not open codec
    
    //接下来开始存储数据到ppm文件
    
    // Allocate video frame
    // Let's allocate a frame for the converted frame now.
    // 申请一个frame,用于存储转换后的frame
    pFrame=av_frame_alloc();
    
    // Allocate an AVFrame structure
    pFrameRGB=av_frame_alloc();
    if(pFrameRGB==NULL)
        return -1;
    
    // Determine required buffer size and allocate buffer
    // Even though we've allocated the frame, we still need a place to put the raw data when we convert it. We use avpicture_get_size to get the size we need, and allocate the space manually:
    // 尽管我们已经申请了frame,我们仍然需要一个地方存放原始数据。我们使用avpicture_get_size来获取我们需要的尺寸,并手动申请空间
    numBytes=avpicture_get_size(AV_PIX_FMT_RGB24, pCodecCtx->width,
                                pCodecCtx->height);
    buffer=(uint8_t *)av_malloc(numBytes*sizeof(uint8_t));
    
    // Assign appropriate parts of buffer to image planes in pFrameRGB
    // Note that pFrameRGB is an AVFrame, but AVFrame is a superset
    // of AVPicture
    // Now we use avpicture_fill to associate the frame with our newly allocated buffer. About the AVPicture cast: the AVPicture struct is a subset of the AVFrame struct - the beginning of the AVFrame struct is identical to the AVPicture struct.
    // 现在我们使用avpicture_fill来关联frame和我们新申请的buffer。AVPicture结构体是AVFrame结构体的一个子集
    avpicture_fill((AVPicture *)pFrameRGB, buffer, AV_PIX_FMT_RGB24,
                   pCodecCtx->width, pCodecCtx->height);
    
    // Finally! Now we're ready to read from the stream!
    // 现在我们可以从stream中读取数据了
    
    // What we're going to do is read through the entire video stream by reading in the packet, decoding it into our frame, and once our frame is complete, we will convert and save it.
    // 我们接下来要做的就是通读整个视频流,解码存入到frame中,并且一旦我们的frame完成,我们将会做转换并保存
    
    // initialize SWS context for software scaling
    sws_ctx = sws_getContext(pCodecCtx->width,
                             pCodecCtx->height,
                             pCodecCtx->pix_fmt,
                             pCodecCtx->width,
                             pCodecCtx->height,
                             AV_PIX_FMT_RGB24,
                             SWS_BILINEAR,
                             NULL,
                             NULL,
                             NULL
                             );
    
    // Read frames and save first five frames to disk
    i=0;
    // av_read_frame() reads in a packet and stores it in the AVPacket struct.
    // av_read_frame()读取packet,并将它存放在AVPacket结构中
    // Note that we've only allocated the packet structure - ffmpeg allocates the internal data for us, which is pointed to by packet.data. This is freed by the av_free_packet() later.
    // 注意我们只申请了packet结构——ffmpeg为我们分配了内部数据,并让packet.data指向了它。这需要av_free_packet()稍后释放
    while(av_read_frame(pFormatCtx, &packet)>=0) {
        // Is this a packet from the video stream?
        if(packet.stream_index==videoStream) {
            // Decode video frame
            // avcodec_decode_video() converts the packet to a frame for us.
            // avcodec_decode_video()帮我们将packet转换为frame
            // However, we might not have all the information we need for a frame after decoding a packet, so avcodec_decode_video() sets frameFinished for us when we have the next frame.
            // 但是,我们也许没有得到我们需要的一个frame的所有信息,所以avcodec_decode_video()设置了frameFinished,让我们接着读取下一个frame
            avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished, &packet);
            
            // Did we get a video frame?
            if(frameFinished) {
                // Convert the image from its native format to RGB
                // Finally, we use sws_scale() to convert from the native format (pCodecCtx->pix_fmt) to RGB.
                // 最后,我们使用sws_scale()从原始格式转换为RGB
                sws_scale(sws_ctx, (uint8_t const * const *)pFrame->data,
                          pFrame->linesize, 0, pCodecCtx->height,
                          pFrameRGB->data, pFrameRGB->linesize);
                
                // Save the frame to disk
                if(++i<=5)
                    SaveFrame(pFrameRGB, pCodecCtx->width, pCodecCtx->height,
                              i);
            }
        }
        
        // Free the packet that was allocated by av_read_frame
        av_free_packet(&packet);
    }
    
    // Free the RGB image
    av_free(buffer);
    av_frame_free(&pFrameRGB);
    
    // Free the YUV frame
    av_frame_free(&pFrame);
    
    // Close the codecs
    avcodec_close(pCodecCtx);
    avcodec_close(pCodecCtxOrig);
    
    // Close the video file
    avformat_close_input(&pFormatCtx);
    
    return 0;
}

参考

原文地址:http://zhyoulun.com/post/ffmpeg-tutorial-01
转载请注明出处