树莓派之SDL2.0_OpenGLES_FFmpeg硬解视频播放

// 如何编译 FFmpeg h264_mmal 请看本博客对应博文
// 开启声音需要提前安装 
apt-get install libasound2-dev
// 首先下载好 SDL 2.0 源码等待编译
http://www.libsdl.org/download-2.0.php

// 编译一版 OpenGL ES 的 SDL ...
./configure --prefix=/usr/local \
   --disable-video-x11 \
   --disable-pulseaudio \
   --disable-esd \
   --disable-video-opengl

Enabled modules : atomic audio video render events joystick 
            haptic sensor power filesystem threads 
            timers file loadso cpuinfo assembly
Assembly Math   :
Audio drivers   : disk dummy oss alsa(dynamic)
Video drivers   : dummy rpi opengl_es1 opengl_es2 vulkan
Input drivers   : linuxev linuxkd
Using libsamplerate : NO
Using libudev       : NO
Using dbus          : NO
Using ime           : YES
Using ibus          : NO
Using fcitx         : NO


make
make install
// Windows 系统
// Qt FFmepg 与 SDL 导入到项目 ...
INCLUDEPATH += D:/FFmepg/dev/include
INCLUDEPATH += D:/FFmepg/SDL2/i686-w64-mingw32/include
LIBS += -lws2_32 -lopengl32 -lole32 -ldxguid -lglut32 -lglu32
LIBS += D:/FFmepg/dev/lib/libavcodec.dll.a \
  D:/FFmepg/dev/lib/libavdevice.dll.a \
  D:/FFmepg/dev/lib/libavfilter.dll.a \
  D:/FFmepg/dev/lib/libavformat.dll.a \
  D:/FFmepg/dev/lib/libavutil.dll.a \
  D:/FFmepg/dev/lib/libswresample.dll.a \
  D:/FFmepg/dev/lib/libswscale.dll.a \
  D:/FFmepg/dev/lib/libpostproc.dll.a
LIBS += D:/FFmepg/SDL2/i686-w64-mingw32/lib/libSDL2.dll.a \
D:/FFmepg/SDL2/i686-w64-mingw32/lib/libSDL2main.a

// 树莓派 ...
INCLUDEPATH += /usr/local/include
LIBS += -lpthread -lm -ldl -lfreetype -lz -lrt
LIBS += -L/usr/local/lib -lavformat -lavcodec -lavutil -lswscale -lx264 -lSDL2 -lSDL2_image
LIBS += -L/opt/vc/lib -lmmal_core -lmmal_util -lmmal_vc_client -lbcm_host

#ifndef CORE_H
#define CORE_H

#include "decoder.h"
#include <QTime>
#include <QDebug>

extern "C"
{
#include "SDL2/SDL.h"
}

class Core
{
private:
    int screen_w=0,screen_h=0;
    SDL_Window *screen;
    SDL_Renderer* sdlRenderer;
    SDL_Texture* sdlTexture;
    SDL_Rect sdlRect;
    Decoder *m_decoder;
    int m_t_num = -1;
    QTime m_t_fps;
public:
    Core();
public:
    bool init();
    void show();
    void decoder(Decoder*);
};

#endif // CORE_H
#include "core.h"

Core::Core()
{

}

void Core::decoder(Decoder *_decoder) {
    this->m_decoder = _decoder;
}

bool Core::init() {

    if(SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER)) {
    printf( "Could not initialize SDL - %s\n", SDL_GetError());
    return -1;
    }

    screen_w = this->m_decoder->pCodecCtx->width;
    screen_h = this->m_decoder->pCodecCtx->height;
    //SDL 2.0 Support for multiple windows
    screen = SDL_CreateWindow("Simplest ffmpeg player's Window", SDL_WINDOWPOS_UNDEFINED, SDL_WINDOWPOS_UNDEFINED,
                  screen_w, screen_h,
                  SDL_WINDOW_OPENGL);

    if(!screen) {
    printf("SDL: could not create window - exiting:%s\n",SDL_GetError());
    return -1;
    }

    sdlRenderer = SDL_CreateRenderer(screen, -1, 0);
    //IYUV: Y + U + V  (3 planes)
    //YV12: Y + V + U  (3 planes)
    sdlTexture = SDL_CreateTexture(sdlRenderer, SDL_PIXELFORMAT_IYUV, SDL_TEXTUREACCESS_STREAMING,this->m_decoder->pCodecCtx->width,this->m_decoder->pCodecCtx->height);

    sdlRect.x=0;
    sdlRect.y=0;
    sdlRect.w=screen_w;
    sdlRect.h=screen_h;

    // Done ...
    return true;

}

void Core::show() {

    this->init();

    while (1) {

    int i = m_decoder->m_frames->size();
    if (i > 0)
    {
        Frame front = m_decoder->m_frames->front();

#if 0
        SDL_UpdateTexture( sdlTexture, NULL, front.frame->data[0], front.frame->linesize[0] );
#else
        SDL_UpdateYUVTexture(sdlTexture, &sdlRect,
                 front.frame->data[0], front.frame->linesize[0],
            front.frame->data[1], front.frame->linesize[1],
            front.frame->data[2], front.frame->linesize[2]);
#endif

        SDL_RenderClear( sdlRenderer );
        SDL_RenderCopy( sdlRenderer, sdlTexture,  NULL, &sdlRect);
        SDL_RenderPresent( sdlRenderer );

    }
    if (i > 1)
    {
        m_decoder->m_frames->pop();
    }

    if (m_t_num == -1 || m_t_fps.elapsed() > 1000) {
        qreal fps = m_t_num * 1000.0 / m_t_fps.elapsed();
        // m_log->debug();
        qDebug(QString("-> FPS -> %1").arg(fps).toStdString().c_str());
        m_t_fps.start();
        m_t_num = 0;
    }
    m_t_num++;

    //Delay 40ms
    SDL_Delay(33);

    }
    SDL_Quit();
}

#ifndef DECODER_H
#define DECODER_H

#include <QDebug>
#include <QThread>
#include <QRunnable>
#include <QScopedPointer>
#include <QImage>
#include "frames.h"

extern "C"
{
#include "libavcodec/avcodec.h"
#include "libavformat/avformat.h"
#include "libswscale/swscale.h"
#include "libavutil/imgutils.h"
}

class Decoder : public QRunnable
{
public:
    Decoder();
    ~Decoder();
    void run();
    bool create();
public:
    AVFormatContext *pFormatCtx;
    AVCodecContext  *pCodecCtx;
    AVCodec     *pCodec;
    int videoindex;
public:
    QScopedPointer<Frames> m_frames;
};

#endif // DECODER_H
#include "decoder.h"

Decoder::Decoder()
{
    m_frames.reset(new Frames());
}

Decoder::~Decoder()
{
}

bool Decoder::create()
{
    av_register_all();
    avformat_network_init();

    pFormatCtx = avformat_alloc_context();

    if(avformat_open_input(&pFormatCtx, "E:/1111.mp4", nullptr, nullptr) != 0){
    printf("Couldn't open input stream.\n");
    return false;
    }

    if(avformat_find_stream_info(pFormatCtx, nullptr) < 0){
    printf("Couldn't find stream information.\n");
    return false;
    }

    videoindex = -1;
    for(int i = 0; i < pFormatCtx->nb_streams; i++) {
    if(pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
        videoindex = i;
        break;
    }
    }
    if(videoindex == -1) {
    printf("Didn't find a video stream.\n");
    return false;
    }

    // --------------------------------- //
    AVCodec* currentCodec = nullptr;
    currentCodec = av_codec_next(currentCodec);
    while (currentCodec != nullptr) {
    if (av_codec_is_encoder(currentCodec)) {
        // m_log->debug(QString("Encoder -> %1 %2").arg(currentCodec->name).arg(currentCodec->long_name));
    }
    if (av_codec_is_decoder(currentCodec)) {
        // m_log->debug(QString("Decoder -> %1 %2").arg(currentCodec->name).arg(currentCodec->long_name));
    }
    currentCodec = av_codec_next(currentCodec);
    }
    // --------------------------------- //

    pCodecCtx = pFormatCtx->streams[videoindex]->codec;

#ifdef Q_OS_LINUX
    pCodec = avcodec_find_decoder_by_name("h264_mmal");
#endif

#ifdef Q_OS_WIN
    pCodec = avcodec_find_decoder(pCodecCtx->codec_id);
#endif

    if(pCodec == nullptr) {
    printf("Codec not found.\n");
    return false;
    }

    if(avcodec_open2(pCodecCtx, pCodec, nullptr) < 0) {
    printf("Could not open codec.\n");
    return false;
    }

    // printf("--------------- File Information ----------------\n");
    // av_dump_format(pFormatCtx, 0, "PICT0023.AVI", 0);
    // printf("-------------------------------------------------\n");

    return true;
}

void Decoder::run()
{

    this->create();

    AVFrame *pFrame = av_frame_alloc();
    // AVFrame *pFrameYUV = av_frame_alloc();
    // AVFrame *pFrameRGB = av_frame_alloc();

    // int numBytes = av_image_get_buffer_size(AV_PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height, 1);
    // unsigned char *out_buffer = (unsigned char *)av_malloc(numBytes);
    // unsigned char *rgb_buffer = (unsigned char *)av_malloc(av_image_get_buffer_size(AV_PIX_FMT_RGB32, pCodecCtx->width, pCodecCtx->height, 1));
    // av_image_fill_arrays(pFrameYUV->data, pFrameYUV->linesize, out_buffer, AV_PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height, 1);
    // av_image_fill_arrays(pFrameRGB->data, pFrameRGB->linesize, rgb_buffer, AV_PIX_FMT_RGB32, pCodecCtx->width, pCodecCtx->height, 1);
    // static struct SwsContext *img_convert_ctx_yuv = sws_getContext(pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height, AV_PIX_FMT_YUV420P, SWS_BICUBIC, nullptr, nullptr, nullptr);
    // static struct SwsContext *img_convert_ctx_rgb = sws_getContext(pCodecCtx->width,pCodecCtx->height, AV_PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height, AV_PIX_FMT_RGB32, SWS_BICUBIC, nullptr, nullptr, nullptr);

    AVPacket *packet = (AVPacket *)av_malloc(sizeof(AVPacket));

    int count = 0;
    while (av_read_frame(pFormatCtx, packet) >= 0) {
    if(packet->stream_index == videoindex) {
        for(;;)
        {
        int size = m_frames->size();
        if (size < 10) {
            break;
        }
        QThread::msleep(10);
        }
        // For decoding, call avcodec_send_packet() to give the decoder raw compressed data in an AVPacket.
        int frameFinished = avcodec_send_packet(pCodecCtx, packet);
        while (!frameFinished) {
        // For decoding, call avcodec_receive_frame(). On success, it will return an AVFrame containing uncompressed audio or video data.
        frameFinished = avcodec_receive_frame(pCodecCtx, pFrame);
        if (!frameFinished) {
            // m_log->debug(QString("%1 %2").arg(pFrame->format == AV_PIX_FMT_YUV420P ? "OK" : "NO").arg(count));
            // -----------------------------------------------
            // sws_scale(img_convert_ctx_yuv, (const uint8_t* const*)pFrame->data, pFrame->linesize, 0, pCodecCtx->height, pFrameYUV->data, pFrameYUV->linesize);
            // sws_scale(img_convert_ctx_rgb, pFrameYUV->data, pFrameYUV->linesize, 0, pCodecCtx->height, pFrameRGB->data, pFrameRGB->linesize);
            // QImage img((uchar *)pFrameRGB->data[0], pCodecCtx->width, pCodecCtx->height, QImage::Format_ARGB32);
            m_frames->push(Frame(av_frame_clone(pFrame), count));
            count++;
            // -----------------------------------------------
            av_frame_unref(pFrame);
        }
        }
    }
    av_packet_unref(packet);
    }

    // av_free(rgb_buffer);
    // sws_freeContext(img_convert_ctx_rgb);
    // av_frame_free(&pFrameRGB);
    // av_free(out_buffer);
    // sws_freeContext(img_convert_ctx_yuv);
    // av_frame_free(&pFrameYUV);
    av_frame_free(&pFrame);
    avcodec_close(pCodecCtx);
    avformat_close_input(&pFormatCtx);

}