hedgewars/avwrapper.c
author Stepan777 <stepik-777@mail.ru>
Sat, 07 Jul 2012 16:21:42 +0400
changeset 7356 1ae5cf294216
parent 7306 3cff5c769509
child 7359 8bb4da06b065
permissions -rw-r--r--
fix for older ffmpeg (0.7.13)


#include <stdlib.h>
#include <stdio.h>
#include <stdint.h>
#include <string.h>
#include <stdarg.h>
#include "libavformat/avformat.h"

#ifndef AVIO_FLAG_WRITE
#define AVIO_FLAG_WRITE AVIO_WRONLY
#endif

static AVFormatContext* g_pContainer;
static AVOutputFormat* g_pFormat;
static AVStream* g_pAStream;
static AVStream* g_pVStream;
static AVFrame* g_pAFrame;
static AVFrame* g_pVFrame;
static AVCodec* g_pACodec;
static AVCodec* g_pVCodec;
static AVCodecContext* g_pAudio;
static AVCodecContext* g_pVideo;

static int g_Width, g_Height;
static uint32_t g_Frequency, g_Channels;
static int g_VQuality, g_AQuality;
static AVRational g_Framerate;
static const char* g_pPreset;

static FILE* g_pSoundFile;
static int16_t* g_pSamples;
static int g_NumSamples;

/*
Initially I wrote code for latest ffmpeg, but on Linux (Ubuntu)
only older version is available from repository. That's why you see here
all of this #if LIBAVCODEC_VERSION_MAJOR < 54.
Actually, it may be possible to remove code for newer version
and use only code for older version.
*/

#if LIBAVCODEC_VERSION_MAJOR < 54
#define OUTBUFFER_SIZE 200000
static uint8_t g_OutBuffer[OUTBUFFER_SIZE];
#endif

// pointer to function from hwengine (uUtils.pas)
static void (*AddFileLogRaw)(const char* pString);

static void FatalError(const char* pFmt, ...)
{
    const char Buffer[1024];
    va_list VaArgs;

    va_start(VaArgs, pFmt);
    vsnprintf(Buffer, 1024, pFmt, VaArgs);
    va_end(VaArgs);

    AddFileLogRaw("Error in av-wrapper: ");
    AddFileLogRaw(Buffer);
    AddFileLogRaw("\n");
    exit(1);
}

// Function to be called from libav for logging.
// Note: libav can call LogCallback from different threads
// (there is mutex in AddFileLogRaw).
static void LogCallback(void* p, int Level, const char* pFmt, va_list VaArgs)
{
    const char Buffer[1024];

    vsnprintf(Buffer, 1024, pFmt, VaArgs);
    AddFileLogRaw(Buffer);
}

static void Log(const char* pFmt, ...)
{
    const char Buffer[1024];
    va_list VaArgs;

    va_start(VaArgs, pFmt);
    vsnprintf(Buffer, 1024, pFmt, VaArgs);
    va_end(VaArgs);

    AddFileLogRaw(Buffer);
}

static void AddAudioStream()
{
#if LIBAVFORMAT_VERSION_MAJOR >= 54
    g_pAStream = avformat_new_stream(g_pContainer, g_pACodec);
#else
    g_pAStream = av_new_stream(g_pContainer, 1);
#endif
    if(!g_pAStream)
    {
        Log("Could not allocate audio stream\n");
        return;
    }
    g_pAStream->id = 1;

    g_pAudio = g_pAStream->codec;

    avcodec_get_context_defaults3(g_pAudio, g_pACodec);
    g_pAudio->codec_id = g_pACodec->id;

    // put parameters
    g_pAudio->sample_fmt = AV_SAMPLE_FMT_S16;
    g_pAudio->sample_rate = g_Frequency;
    g_pAudio->channels = g_Channels;

    // set quality
    if (g_AQuality > 100)
        g_pAudio->bit_rate = g_AQuality;
    else
    {
        g_pAudio->flags |= CODEC_FLAG_QSCALE;
        g_pAudio->global_quality = g_AQuality*FF_QP2LAMBDA;
    }

    // some formats want stream headers to be separate
    if (g_pFormat->flags & AVFMT_GLOBALHEADER)
        g_pAudio->flags |= CODEC_FLAG_GLOBAL_HEADER;

    // open it
    if (avcodec_open2(g_pAudio, g_pACodec, NULL) < 0)
    {
        Log("Could not open audio codec %s\n", g_pACodec->long_name);
        return;
    }

#if LIBAVCODEC_VERSION_MAJOR >= 54
    if (g_pACodec->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE)
#else
    if (g_pAudio->frame_size == 0)
#endif
        g_NumSamples = 4096;
    else
        g_NumSamples = g_pAudio->frame_size;
    g_pSamples = (int16_t*)av_malloc(g_NumSamples*g_Channels*sizeof(int16_t));
    g_pAFrame = avcodec_alloc_frame();
    if (!g_pAFrame)
    {
        Log("Could not allocate frame\n");
        return;
    }
}

// returns non-zero if there is more sound
static int WriteAudioFrame()
{
    if (!g_pAStream)
        return 0;

    AVPacket Packet = { 0 };
    av_init_packet(&Packet);

    int NumSamples = fread(g_pSamples, 2*g_Channels, g_NumSamples, g_pSoundFile);

#if LIBAVCODEC_VERSION_MAJOR >= 54
    AVFrame* pFrame = NULL;
    if (NumSamples > 0)
    {
        g_pAFrame->nb_samples = NumSamples;
        avcodec_fill_audio_frame(g_pAFrame, g_Channels, AV_SAMPLE_FMT_S16,
                                 (uint8_t*)g_pSamples, NumSamples*2*g_Channels, 1);
        pFrame = g_pAFrame;
    }
    // when NumSamples == 0 we still need to call encode_audio2 to flush
    int got_packet;
    if (avcodec_encode_audio2(g_pAudio, &Packet, pFrame, &got_packet) != 0)
        FatalError("avcodec_encode_audio2 failed");
    if (!got_packet)
        return 0;
#else
    if (NumSamples == 0)
        return 0;
    int BufferSize = OUTBUFFER_SIZE;
    if (g_pAudio->frame_size == 0)
        BufferSize = NumSamples*g_Channels*2;
    Packet.size = avcodec_encode_audio(g_pAudio, g_OutBuffer, BufferSize, g_pSamples);
    if (Packet.size == 0)
        return 1;
    if (g_pAudio->coded_frame && g_pAudio->coded_frame->pts != AV_NOPTS_VALUE)
        Packet.pts = av_rescale_q(g_pAudio->coded_frame->pts, g_pAudio->time_base, g_pAStream->time_base);
    Packet.flags |= AV_PKT_FLAG_KEY;
    Packet.data = g_OutBuffer;
#endif

    // Write the compressed frame to the media file.
    Packet.stream_index = g_pAStream->index;
    if (av_interleaved_write_frame(g_pContainer, &Packet) != 0) 
        FatalError("Error while writing audio frame");
    return 1;
}

// add a video output stream
static void AddVideoStream()
{
#if LIBAVFORMAT_VERSION_MAJOR >= 54
    g_pVStream = avformat_new_stream(g_pContainer, g_pVCodec);
#else
    g_pVStream = av_new_stream(g_pContainer, 0);
#endif
    if (!g_pVStream)
        FatalError("Could not allocate video stream");

    g_pVideo = g_pVStream->codec;

    avcodec_get_context_defaults3(g_pVideo, g_pVCodec);
    g_pVideo->codec_id = g_pVCodec->id;

    // put parameters
    // resolution must be a multiple of two
    g_pVideo->width  = g_Width  & ~1; // make even (dimensions should be even)
    g_pVideo->height = g_Height & ~1; // make even
    /* time base: this is the fundamental unit of time (in seconds) in terms
       of which frame timestamps are represented. for fixed-fps content,
       timebase should be 1/framerate and timestamp increments should be
       identically 1. */
    g_pVideo->time_base.den = g_Framerate.num;
    g_pVideo->time_base.num = g_Framerate.den;
    //g_pVideo->gop_size = 12; /* emit one intra frame every twelve frames at most */
    g_pVideo->pix_fmt = PIX_FMT_YUV420P;

    // set quality
    if (g_VQuality > 100)
        g_pVideo->bit_rate = g_VQuality;
    else
    {
        g_pVideo->flags |= CODEC_FLAG_QSCALE;
        g_pVideo->global_quality = g_VQuality*FF_QP2LAMBDA;
    }

    AVDictionary* pDict = NULL;
    if (strcmp(g_pVCodec->name, "libx264") == 0)
        av_dict_set(&pDict, "preset", g_pPreset, 0);

    // some formats want stream headers to be separate
    if (g_pFormat->flags & AVFMT_GLOBALHEADER)
        g_pVideo->flags |= CODEC_FLAG_GLOBAL_HEADER;

    // open the codec
    if (avcodec_open2(g_pVideo, g_pVCodec, &pDict) < 0)
        FatalError("Could not open video codec %s", g_pVCodec->long_name);

    g_pVFrame = avcodec_alloc_frame();
    if (!g_pVFrame)
        FatalError("Could not allocate frame");

    g_pVFrame->linesize[0] = g_Width;
    g_pVFrame->linesize[1] = g_Width/2;
    g_pVFrame->linesize[2] = g_Width/2;
    g_pVFrame->linesize[3] = 0;
}

static int WriteFrame(AVFrame* pFrame)
{
    double AudioTime, VideoTime;

    // write interleaved audio frame
    if (g_pAStream)
    {
        VideoTime = (double)g_pVStream->pts.val*g_pVStream->time_base.num/g_pVStream->time_base.den;
        do
            AudioTime = (double)g_pAStream->pts.val*g_pAStream->time_base.num/g_pAStream->time_base.den;
        while (AudioTime < VideoTime && WriteAudioFrame());
    }
    
    if (!g_pVStream)
        return 0;

    AVPacket Packet;
    av_init_packet(&Packet);
    Packet.data = NULL;
    Packet.size = 0;

    g_pVFrame->pts++;
    if (g_pFormat->flags & AVFMT_RAWPICTURE)
    {
        /* raw video case. The API will change slightly in the near
           future for that. */
        Packet.flags |= AV_PKT_FLAG_KEY;
        Packet.stream_index = g_pVStream->index;
        Packet.data = (uint8_t*)pFrame;
        Packet.size = sizeof(AVPicture);

        if (av_interleaved_write_frame(g_pContainer, &Packet) != 0)
            FatalError("Error while writing video frame");
        return 0;
    }
    else
    {
#if LIBAVCODEC_VERSION_MAJOR >= 54
        int got_packet;
        if (avcodec_encode_video2(g_pVideo, &Packet, pFrame, &got_packet) < 0)
            FatalError("avcodec_encode_video2 failed");
        if (!got_packet)
            return 0;

        if (Packet.pts != AV_NOPTS_VALUE)
            Packet.pts = av_rescale_q(Packet.pts, g_pVideo->time_base, g_pVStream->time_base);
        if (Packet.dts != AV_NOPTS_VALUE)
            Packet.dts = av_rescale_q(Packet.dts, g_pVideo->time_base, g_pVStream->time_base);
#else 
        Packet.size = avcodec_encode_video(g_pVideo, g_OutBuffer, OUTBUFFER_SIZE, pFrame);
        if (Packet.size < 0)
            FatalError("avcodec_encode_video failed");
        if (Packet.size == 0)
            return 0;

        if( g_pVideo->coded_frame->pts != AV_NOPTS_VALUE)
            Packet.pts = av_rescale_q(g_pVideo->coded_frame->pts, g_pVideo->time_base, g_pVStream->time_base);
        if( g_pVideo->coded_frame->key_frame )
            Packet.flags |= AV_PKT_FLAG_KEY;
        Packet.data = g_OutBuffer;
#endif
        // write the compressed frame in the media file
        Packet.stream_index = g_pVStream->index;
        if (av_interleaved_write_frame(g_pContainer, &Packet) != 0)
            FatalError("Error while writing video frame");
            
        return 1;
    }
}

void AVWrapper_WriteFrame(uint8_t* pY, uint8_t* pCb, uint8_t* pCr)
{
    g_pVFrame->data[0] = pY;
    g_pVFrame->data[1] = pCb;
    g_pVFrame->data[2] = pCr;
    WriteFrame(g_pVFrame);
}

void AVWrapper_Init(
         void (*pAddFileLogRaw)(const char*),
         const char* pFilename,
         const char* pDesc,
         const char* pSoundFile,
         const char* pFormatName,
         const char* pVCodecName,
         const char* pACodecName,
         const char* pVPreset,
         int Width, int Height,
         int FramerateNum, int FramerateDen,
         int VQuality, int AQuality)
{    
    AddFileLogRaw = pAddFileLogRaw;
    av_log_set_callback( &LogCallback );

    g_Width  = Width;
    g_Height = Height;
    g_Framerate.num = FramerateNum;
    g_Framerate.den = FramerateDen;
    g_VQuality = VQuality;
    g_AQuality = AQuality;
    g_pPreset = pVPreset;

    // initialize libav and register all codecs and formats
    av_register_all();

    // find format
    g_pFormat = av_guess_format(pFormatName, NULL, NULL);
    if (!g_pFormat)
        FatalError("Format \"%s\" was not found", pFormatName);

    // allocate the output media context
    g_pContainer = avformat_alloc_context();
    if (!g_pContainer)
        FatalError("Could not allocate output context");

    g_pContainer->oformat = g_pFormat;

    // store description of file
    av_dict_set(&g_pContainer->metadata, "comment", pDesc, 0);

    // append extesnion to filename
    char ext[16];
    strncpy(ext, g_pFormat->extensions, 16);
    ext[15] = 0;
    ext[strcspn(ext,",")] = 0;
    snprintf(g_pContainer->filename, sizeof(g_pContainer->filename), "%s.%s", pFilename, ext);

    // find codecs
    g_pVCodec = avcodec_find_encoder_by_name(pVCodecName);
    g_pACodec = avcodec_find_encoder_by_name(pACodecName);

    // add audio and video stream to container
    g_pVStream = NULL;
    g_pAStream = NULL;

    if (g_pVCodec)
        AddVideoStream();
    else
        Log("Video codec \"%s\" was not found; video will be ignored.\n", pVCodecName);

    if (g_pACodec)
    {
        g_pSoundFile = fopen(pSoundFile, "rb");
        if (g_pSoundFile)
        {
            fread(&g_Frequency, 4, 1, g_pSoundFile);
            fread(&g_Channels, 4, 1, g_pSoundFile);
            AddAudioStream();
        }
        else
            Log("Could not open %s\n", pSoundFile);
    }
    else
        Log("Audio codec \"%s\" was not found; audio will be ignored.\n", pACodecName);

    if (!g_pAStream && !g_pVStream)
        FatalError("No video, no audio, aborting...");

    // write format info to log
    av_dump_format(g_pContainer, 0, g_pContainer->filename, 1);

    // open the output file, if needed
    if (!(g_pFormat->flags & AVFMT_NOFILE))
    {
        if (avio_open(&g_pContainer->pb, g_pContainer->filename, AVIO_FLAG_WRITE) < 0)
            FatalError("Could not open output file (%s)", g_pContainer->filename);
    }

    // write the stream header, if any
    avformat_write_header(g_pContainer, NULL);

    g_pVFrame->pts = -1;
}

void AVWrapper_Close()
{
    // output buffered frames
    if (g_pVCodec->capabilities & CODEC_CAP_DELAY)
        while( WriteFrame(NULL) );
    // output any remaining audio
    while( WriteAudioFrame() );

    // write the trailer, if any.
    av_write_trailer(g_pContainer);

    // close the output file
    if (!(g_pFormat->flags & AVFMT_NOFILE))
        avio_close(g_pContainer->pb);

    // free everything
    if (g_pVStream)
    {
        avcodec_close(g_pVideo);
        av_free(g_pVideo);
        av_free(g_pVStream);
        av_free(g_pVFrame);
    }
    if (g_pAStream)
    {
        avcodec_close(g_pAudio);
        av_free(g_pAudio);
        av_free(g_pAStream);
        av_free(g_pAFrame);
        av_free(g_pSamples);
        fclose(g_pSoundFile);
    }

    av_free(g_pContainer);
}