浏览代码

V1.5.0

从零开始学习音视频编程技术(二十) 录屏软件开发之录屏生成MP4
叶海辉 6 年之前
父节点
当前提交
c3cfbdc1ce
共有 12 个文件被更改,包括 1205 次插入725 次删除
  1. 12 5
      VideoRecorder.pro
  2. 二进制
      in.pcm
  3. 0 4
      in.yuv
  4. 12 714
      src/main.cpp
  5. 67 0
      src/mainwindow.cpp
  6. 38 0
      src/mainwindow.h
  7. 68 0
      src/mainwindow.ui
  8. 636 0
      src/savevideofile.cpp
  9. 36 0
      src/savevideofile.h
  10. 266 0
      src/screenrecorder.cpp
  11. 68 0
      src/screenrecorder.h
  12. 2 2
      说明.txt

+ 12 - 5
VideoRecorder.pro

@@ -8,14 +8,20 @@ QT       += core gui
 
 greaterThan(QT_MAJOR_VERSION, 4): QT += widgets
 
-TARGET = VideoRecorder_7
+TARGET = VideoRecorder
 TEMPLATE = app
 
-SOURCES += src/main.cpp
 
-HEADERS  +=
+SOURCES += src/main.cpp\
+        src/mainwindow.cpp \
+    src/savevideofile.cpp \
+    src/screenrecorder.cpp
 
-FORMS    +=
+HEADERS  += src/mainwindow.h \
+    src/savevideofile.h \
+    src/screenrecorder.h
+
+FORMS    += src/mainwindow.ui
 
 INCLUDEPATH += $$PWD/lib/ffmpeg/include \
                $$PWD/lib/SDL2/include \
@@ -31,4 +37,5 @@ LIBS += $$PWD/lib/ffmpeg/lib/avcodec.lib \
         $$PWD/lib/ffmpeg/lib/swscale.lib \
         $$PWD/lib/SDL2/lib/x86/SDL2.lib
 
-
+LIBS += -lwinmm
+LIBS += -lws2_32

二进制
in.pcm


文件差异内容过多而无法显示
+ 0 - 4
in.yuv


+ 12 - 714
src/main.cpp

@@ -5,725 +5,23 @@
  * http://blog.yundiantech.com/
  */
 
-#include <stdlib.h>
-#include <stdio.h>
-#include <string.h>
-#include <math.h>
+#include "mainwindow.h"
+#include <QApplication>
 
-#include <QDebug>
-
-extern"C"
-{
-    #include "libavutil/mathematics.h"
-    #include "libavformat/avformat.h"
-    #include "libswscale/swscale.h"
-
-    #include "SDL.h"
-    #include "SDL_thread.h"
-    #include "SDL_events.h"
-
-}
-
-#define WIDTH  176
-#define HEIGHT 144
-
-#define PCM_FILE_NAME "in.pcm"
-#define YUV_FILE_NAME "in.yuv"
-#define OUT_VIDEO_FILENAME "out.mp4"
-
-uint8_t picture_buf[WIDTH*HEIGHT*4];
-bool isStop = false;
-
-static float t, tincr, tincr2;
-static int16_t *samples;
-static uint8_t *audio_outbuf;
-static int audio_outbuf_size;
-int audio_input_frame_size;
-
-#define STREAM_DURATION   200.0  //视频总长度单位秒
-#define STREAM_FRAME_RATE 25 /* 25 images/s */
-#define STREAM_NB_FRAMES  ((int)(STREAM_DURATION * STREAM_FRAME_RATE))
-#define STREAM_PIX_FMT PIX_FMT_YUV420P /* default pix_fmt */
-
-/**************************************************************/
-/* audio output */
-
-struct BufferDataNode
-{
-    uint8_t * buffer;
-    int bufferSize;
-    BufferDataNode * next;
-};
-
-SDL_mutex *videoMutex = SDL_CreateMutex();
-BufferDataNode * videoDataQueneHead = NULL;
-BufferDataNode * videoDataQueneTail = NULL;
-
-SDL_mutex *audioMutex = SDL_CreateMutex();
-BufferDataNode * AudioDataQueneHead = NULL;
-BufferDataNode * AudioDataQueneTail = NULL;
-
-void videoDataQuene_Input(uint8_t * buffer,int size)
-{
-    BufferDataNode * node = (BufferDataNode*)malloc(sizeof(BufferDataNode));
-    node->buffer = (uint8_t *)malloc(size);
-    node->bufferSize = size;
-    node->next = NULL;
-
-    memcpy(node->buffer,buffer,size);
-
-    SDL_LockMutex(videoMutex);
-
-    if (videoDataQueneHead == NULL)
-    {
-        videoDataQueneHead = node;
-    }
-    else
-    {
-        videoDataQueneTail->next = node;
-    }
-
-    videoDataQueneTail = node;
-
-    SDL_UnlockMutex(videoMutex);
-}
-
-static BufferDataNode *videoDataQuene_get()
-{
-    BufferDataNode * node = NULL;
-
-    SDL_LockMutex(videoMutex);
-
-    if (videoDataQueneHead != NULL)
-    {
-        node = videoDataQueneHead;
-
-        if (videoDataQueneTail == videoDataQueneHead)
-        {
-            videoDataQueneTail = NULL;
-        }
-
-        videoDataQueneHead = videoDataQueneHead->next;
-    }
-
-    SDL_UnlockMutex(videoMutex);
-
-    return node;
-}
-
-
-void audioDataQuene_Input(uint8_t * buffer,int size)
-{
-    BufferDataNode * node = (BufferDataNode*)malloc(sizeof(BufferDataNode));
-    node->buffer = (uint8_t *)malloc(size);
-    node->bufferSize = size;
-    node->next = NULL;
-
-    memcpy(node->buffer,buffer,size);
-
-    SDL_LockMutex(audioMutex);
-
-
-
-    if (AudioDataQueneHead == NULL)
-    {
-        AudioDataQueneHead = node;
-    }
-    else
-    {
-        AudioDataQueneTail->next = node;
-    }
-
-    AudioDataQueneTail = node;
-
-    SDL_UnlockMutex(audioMutex);
-}
-
-static BufferDataNode *audioDataQuene_get()
-{
-    BufferDataNode * node = NULL;
-
-    SDL_LockMutex(audioMutex);
-
-    if (AudioDataQueneHead != NULL)
-    {
-        node = AudioDataQueneHead;
-
-        if (AudioDataQueneTail == AudioDataQueneHead)
-        {
-            AudioDataQueneTail = NULL;
-        }
-
-        AudioDataQueneHead = AudioDataQueneHead->next;
-    }
-
-    SDL_UnlockMutex(audioMutex);
-
-    return node;
-}
-
-/*
- * add an audio output stream
- */
-static AVStream *add_audio_stream(AVFormatContext *oc, AVCodecID codec_id)
-{
-    AVCodecContext *c;
-    AVStream *st;
-
-    st = avformat_new_stream(oc, NULL);
-    if (!st) {
-        fprintf(stderr, "Could not alloc stream\n");
-        exit(1);
-    }
-    st->id = 1;
-
-    c = st->codec;
-    c->codec_id = codec_id;
-    c->codec_type = AVMEDIA_TYPE_AUDIO;
-
-    /* put sample parameters */
-    c->sample_fmt = AV_SAMPLE_FMT_S16;
-    c->bit_rate = 44100;
-    c->sample_rate = 44100;
-    c->channels = 2;
-
-    // some formats want stream headers to be separate
-    if (oc->oformat->flags & AVFMT_GLOBALHEADER)
-        c->flags |= CODEC_FLAG_GLOBAL_HEADER;
-
-    return st;
-}
-
-static void open_audio(AVFormatContext *oc, AVStream *st)
-{
-    AVCodecContext *c;
-    AVCodec *codec;
-
-    c = st->codec;
-
-    /* find the audio encoder */
-    codec = avcodec_find_encoder(c->codec_id);
-    if (!codec) {
-        fprintf(stderr, "codec not found\n");
-        exit(1);
-    }
-
-    /* open it */
-    if (avcodec_open2(c, codec,NULL) < 0) {
-        fprintf(stderr, "could not open codec\n");
-        exit(1);
-    }
-
-    /* init signal generator */
-    t = 0;
-    tincr = 2 * M_PI * 110.0 / c->sample_rate;
-    /* increment frequency by 110 Hz per second */
-    tincr2 = 2 * M_PI * 110.0 / c->sample_rate / c->sample_rate;
-
-    audio_outbuf_size = 10000;
-    audio_outbuf = (uint8_t *)av_malloc(audio_outbuf_size);
-
-
-    if (c->frame_size <= 1) {
-        audio_input_frame_size = audio_outbuf_size / c->channels;
-        switch(st->codec->codec_id) {
-        case CODEC_ID_PCM_S16LE:
-        case CODEC_ID_PCM_S16BE:
-        case CODEC_ID_PCM_U16LE:
-        case CODEC_ID_PCM_U16BE:
-            audio_input_frame_size >>= 1;
-            break;
-        default:
-            break;
-        }
-    } else {
-        audio_input_frame_size = c->frame_size;
-    }
-    samples = (int16_t *)av_malloc(audio_input_frame_size * 2 * c->channels);
-}
-
-static void write_audio_frame(AVFormatContext *oc, AVStream *st)
-{
-    AVCodecContext *c;
-    AVPacket pkt;
-    av_init_packet(&pkt);
-
-    c = st->codec;
-
-    BufferDataNode *node = audioDataQuene_get();
-
-    if (node == NULL)
-    {
-        SDL_Delay(1); //延时1ms
-        return;
-    }
-    else
-    {
-        memcpy(samples,node->buffer, node->bufferSize);
-
-        free(node->buffer);
-        free(node);
-    }
-
-//    fread(samples, 1, audio_input_frame_size*4, pcmInFp);
-
-    pkt.size = avcodec_encode_audio(c, audio_outbuf, audio_outbuf_size, samples);
-
-    if (c->coded_frame && c->coded_frame->pts != AV_NOPTS_VALUE)
-        pkt.pts= av_rescale_q(c->coded_frame->pts, c->time_base, st->time_base);
-    pkt.flags |= AV_PKT_FLAG_KEY;
-    pkt.stream_index = st->index;
-    pkt.data = audio_outbuf;
-
-    /* write the compressed frame in the media file */
-    if (av_interleaved_write_frame(oc, &pkt) != 0) {
-        fprintf(stderr, "Error while writing audio frame\n");
-        exit(1);
-    }
-}
-
-static void close_audio(AVFormatContext *oc, AVStream *st)
-{
-    avcodec_close(st->codec);
-
-    av_free(samples);
-    av_free(audio_outbuf);
-}
-
-/**************************************************************/
-/* video output */
-
-static AVFrame *picture, *tmp_picture;
-static uint8_t *video_outbuf;
-static int video_outbuf_size;
-
-/* add a video output stream */
-static AVStream *add_video_stream(AVFormatContext *oc, AVCodecID codec_id)
-{
-    AVCodecContext *c;
-    AVStream *st;
-    AVCodec *codec;
-
-    st = avformat_new_stream(oc, NULL);
-    if (!st) {
-        fprintf(stderr, "Could not alloc stream\n");
-        exit(1);
-    }
-
-    c = st->codec;
-
-    /* find the video encoder */
-    codec = avcodec_find_encoder(codec_id);
-    if (!codec) {
-        fprintf(stderr, "codec not found\n");
-        exit(1);
-    }
-    avcodec_get_context_defaults3(c, codec);
-
-    c->codec_id = codec_id;
-
-    /* put sample parameters */
-    c->bit_rate = 400000;
-    /* resolution must be a multiple of two */
-    c->width = WIDTH;
-    c->height = HEIGHT;
-    /* time base: this is the fundamental unit of time (in seconds) in terms
-       of which frame timestamps are represented. for fixed-fps content,
-       timebase should be 1/framerate and timestamp increments should be
-       identically 1. */
-    c->time_base.den = STREAM_FRAME_RATE;
-    c->time_base.num = 1;
-    c->gop_size = 12; /* emit one intra frame every twelve frames at most */
-    c->pix_fmt = STREAM_PIX_FMT;
-    if (c->codec_id == CODEC_ID_MPEG2VIDEO) {
-        /* just for testing, we also add B frames */
-        c->max_b_frames = 2;
-    }
-    if (c->codec_id == CODEC_ID_MPEG1VIDEO){
-        /* Needed to avoid using macroblocks in which some coeffs overflow.
-           This does not happen with normal video, it just happens here as
-           the motion of the chroma plane does not match the luma plane. */
-        c->mb_decision=2;
-    }
-    // some formats want stream headers to be separate
-    if (oc->oformat->flags & AVFMT_GLOBALHEADER)
-        c->flags |= CODEC_FLAG_GLOBAL_HEADER;
-
-    return st;
-}
-
-static AVFrame *alloc_picture(enum PixelFormat pix_fmt, int width, int height)
-{
-    AVFrame *picture;
-    uint8_t *picture_buf;
-    int size;
-
-    picture = avcodec_alloc_frame();
-    if (!picture)
-        return NULL;
-    size = avpicture_get_size(pix_fmt, width, height);
-    picture_buf = (uint8_t *)av_malloc(size);
-    if (!picture_buf) {
-        av_free(picture);
-        return NULL;
-    }
-    avpicture_fill((AVPicture *)picture, picture_buf,
-                   pix_fmt, width, height);
-    return picture;
-}
-
-static void open_video(AVFormatContext *oc, AVStream *st)
-{
-    AVCodec *codec;
-    AVCodecContext *c;
-
-    c = st->codec;
-
-    /* find the video encoder */
-    codec = avcodec_find_encoder(c->codec_id);
-    if (!codec) {
-        fprintf(stderr, "codec not found\n");
-        exit(1);
-    }
-
-    /* open the codec */
-    if (avcodec_open2(c, codec, NULL) < 0) {
-        fprintf(stderr, "could not open codec\n");
-        exit(1);
-    }
-
-    video_outbuf = NULL;
-    if (!(oc->oformat->flags & AVFMT_RAWPICTURE)) {
-        video_outbuf_size = 200000;
-        video_outbuf = (uint8_t *)av_malloc(video_outbuf_size);
-    }
-
-    /* allocate the encoded raw picture */
-    picture = alloc_picture(c->pix_fmt, c->width, c->height);
-    if (!picture) {
-        fprintf(stderr, "Could not allocate picture\n");
-        exit(1);
-    }
-
-    /* if the output format is not YUV420P, then a temporary YUV420P
-       picture is needed too. It is then converted to the required
-       output format */
-    tmp_picture = NULL;
-    if (c->pix_fmt != PIX_FMT_YUV420P) {
-        tmp_picture = alloc_picture(PIX_FMT_YUV420P, c->width, c->height);
-        if (!tmp_picture) {
-            fprintf(stderr, "Could not allocate temporary picture\n");
-            exit(1);
-        }
-    }
-}
-
-static void write_video_frame(AVFormatContext *oc, AVStream *st)
-{
-
-    int out_size, ret;
-    AVCodecContext *c;
-
-    c = st->codec;
-
-    BufferDataNode *node = videoDataQuene_get();
-
-    if (node == NULL)
-    {
-        SDL_Delay(1); //延时1ms
-        return;
-    }
-    else
-    {
-        int y_size = c->width * c->height;
-        memcpy(picture_buf,node->buffer, y_size*3/2);
-
-        free(node->buffer);
-        free(node);
-
-        picture->data[0] = picture_buf;  // 亮度Y
-        picture->data[1] = picture_buf+ y_size;  // U
-        picture->data[2] = picture_buf+ y_size*5/4; // V
-    }
-
-    if (oc->oformat->flags & AVFMT_RAWPICTURE) {
-        /* raw video case. The API will change slightly in the near
-           future for that. */
-        AVPacket pkt;
-        av_init_packet(&pkt);
-
-        pkt.flags |= AV_PKT_FLAG_KEY;
-        pkt.stream_index = st->index;
-        pkt.data = (uint8_t *)picture;
-        pkt.size = sizeof(AVPicture);
-        ret = av_interleaved_write_frame(oc, &pkt);
-    } else {
-        /* encode the image */
-        out_size = avcodec_encode_video(c, video_outbuf, video_outbuf_size, picture);
-        /* if zero size, it means the image was buffered */
-        if (out_size > 0) {
-            AVPacket pkt;
-            av_init_packet(&pkt);
-
-            if (c->coded_frame->pts != AV_NOPTS_VALUE)
-                pkt.pts= av_rescale_q(c->coded_frame->pts, c->time_base, st->time_base);
-            if(c->coded_frame->key_frame)
-                pkt.flags |= AV_PKT_FLAG_KEY;
-            pkt.stream_index = st->index;
-
-            pkt.data = video_outbuf;
-            pkt.size = out_size;
-
-            /* write the compressed frame in the media file */
-            ret = av_interleaved_write_frame(oc, &pkt);
-
-        } else {
-            ret = 0;
-        }
-    }
-
-    if (ret != 0) {
-        fprintf(stderr, "Error while writing video frame\n");
-        exit(1);
-    }
-
-}
-
-static void close_video(AVFormatContext *oc, AVStream *st)
-{
-    avcodec_close(st->codec);
-    av_free(picture->data[0]);
-    av_free(picture);
-    if (tmp_picture) {
-        av_free(tmp_picture->data[0]);
-        av_free(tmp_picture);
-    }
-    av_free(video_outbuf);
-}
-
-int encode_thread(void *arg)
-{
-
-    const char *filename;
-    AVOutputFormat *fmt;
-    AVFormatContext *oc;
-    AVStream *audio_st, *video_st;
-    double audio_pts, video_pts;
-    int i;
-
-    /* initialize libavcodec, and register all codecs and formats */
-    av_register_all();
-
-    filename = OUT_VIDEO_FILENAME;
-
-    /* allocate the output media context */
-    avformat_alloc_output_context2(&oc, NULL, NULL, filename);
-    if (!oc) {
-        printf("Could not deduce output format from file extension: using MPEG.\n");
-        avformat_alloc_output_context2(&oc, NULL, "mpeg", filename);
-    }
-    if (!oc) {
-        return 1;
-    }
-    fmt = oc->oformat;
-
-    /* add the audio and video streams using the default format codecs
-       and initialize the codecs */
-    video_st = NULL;
-    audio_st = NULL;
-    if (fmt->video_codec != CODEC_ID_NONE) {
-        video_st = add_video_stream(oc, fmt->video_codec);
-    }
-    if (fmt->audio_codec != CODEC_ID_NONE) {
-        audio_st = add_audio_stream(oc, fmt->audio_codec);
-    }
-
-    av_dump_format(oc, 0, filename, 1);
-
-    /* now that all the parameters are set, we can open the audio and
-       video codecs and allocate the necessary encode buffers */
-    if (video_st)
-        open_video(oc, video_st);
-    if (audio_st)
-        open_audio(oc, audio_st);
-
-    /* open the output file, if needed */
-    if (!(fmt->flags & AVFMT_NOFILE)) {
-        if (avio_open(&oc->pb, filename, AVIO_FLAG_WRITE) < 0) {
-            fprintf(stderr, "Could not open '%s'\n", filename);
-            return 1;
-        }
-    }
-
-    /* write the stream header, if any */
-//    av_write_header(oc);
-    avformat_write_header(oc,NULL);
-
-    picture->pts = 0;
-    while(!isStop)
-    {
-        /* compute current audio and video time */
-        if (audio_st)
-            audio_pts = (double)audio_st->pts.val * audio_st->time_base.num / audio_st->time_base.den;
-        else
-            audio_pts = 0.0;
-
-        if (video_st)
-            video_pts = (double)video_st->pts.val * video_st->time_base.num / video_st->time_base.den;
-        else
-            video_pts = 0.0;
-
-        qDebug()<<audio_pts<<video_pts;
-
-        if ((!audio_st || audio_pts >= STREAM_DURATION) &&
-            (!video_st || video_pts >= STREAM_DURATION))
-            break;
-
-        /* write interleaved audio and video frames */
-        if (!video_st || (video_st && audio_st && audio_pts < video_pts)) {
-            write_audio_frame(oc, audio_st);
-        } else {
-            write_video_frame(oc, video_st);
-            picture->pts++;
-        }
-    }
-
-
-
-    av_write_trailer(oc);
-
-    /* close each codec */
-    if (video_st)
-        close_video(oc, video_st);
-    if (audio_st)
-        close_audio(oc, audio_st);
-
-    /* free the streams */
-    for(i = 0; i < oc->nb_streams; i++) {
-        av_freep(&oc->streams[i]->codec);
-        av_freep(&oc->streams[i]);
-    }
-
-    if (!(fmt->flags & AVFMT_NOFILE)) {
-        /* close the output file */
-        avio_close(oc->pb);
-    }
-
-    /* free the stream */
-    av_free(oc);
-
-    return 0;
-}
-
-bool startEncode()
-{
-    SDL_Thread *encodeThreadId = SDL_CreateThread(encode_thread, "parse_thread", NULL);
-
-    if (!encodeThreadId)
-    {
-        return false;
-    }
-
-    return true;
-}
-
-
-int putVideoThread(void*arg);
-int putAudioThread(void*arg);
-
-FILE *pcmInFp = fopen(PCM_FILE_NAME,"rb");
-FILE *yuv420InFp = fopen(YUV_FILE_NAME,"rb");
+#include <QTextCodec>
 
 #undef main
-
-int main()
+int main(int argc, char *argv[])
 {
-    if (pcmInFp == NULL || yuv420InFp == NULL)
-    {
-        ///请把pcm和yuv文件拷贝到构建目录下
-        fprintf(stderr,"please copy the yuv and pcm file to the Qt Build folder!\n");
-        exit(-1);
-    }
-
-    startEncode(); //开启编码并写入文件的线程
-    SDL_CreateThread(putAudioThread, "parse_thread", NULL); //开启读取PCM的线程
-    SDL_CreateThread(putVideoThread, "parse_thread", NULL); //开启读取YUV的线程
-
-    while(1); //主线程停住等待
-
-    return 0;
-}
-
-
-
-///读取输入文件相关 - Begin
-int putVideoThread(void*arg)
-{
-    int y_size = WIDTH * HEIGHT;
-
-    while(1)
-    {
-
-        char buffer[WIDTH*HEIGHT*3];
-
-        int size = fread(buffer, 1, y_size*3/2, yuv420InFp);
-
-        if (size < 0){
-          printf("Failed to read YUV data! 文件读取错误\n");
-          return 0;
-        }
-        else if(feof(yuv420InFp))
-        {
-            qDebug()<<"read video finished!";
-
-            fclose(yuv420InFp);
-            yuv420InFp = fopen(YUV_FILE_NAME,"rb");
-
-            continue;
-        }
-
-        if (size == y_size*3/2)
-        {
-            videoDataQuene_Input((uint8_t*)buffer,y_size*3/2);
-        }
-
-        SDL_Delay(10);
-    }
-
-    return 0;
-}
-
-
-int putAudioThread(void*arg)
-{
-    while(1)
-    {
-
-        char buffer[WIDTH*HEIGHT*3];
-        int size = fread(buffer, 1, audio_input_frame_size*4, pcmInFp);
-
-        if (size < 0){
-          printf("Failed to read YUV data! 文件读取错误\n");
-          return 0;
-        }
-        else if(feof(pcmInFp))
-        {
-            qDebug()<<"read audio finished!";
-
-            fclose(pcmInFp);
-            pcmInFp = fopen(PCM_FILE_NAME,"rb");
-
-            continue;
-        }
+    QApplication a(argc, argv);
 
-        if (size == audio_input_frame_size*4)
-        {
-            audioDataQuene_Input((uint8_t*)buffer,audio_input_frame_size*4);
-        }
+    QTextCodec *codec = QTextCodec::codecForName("UTF-8");
+    QTextCodec::setCodecForLocale(codec);
+    QTextCodec::setCodecForCStrings(codec);
+    QTextCodec::setCodecForTr(codec);
 
-        SDL_Delay(1);
-    }
+    MainWindow w;
+    w.show();
 
-    return 0;
+    return a.exec();
 }
-///读取输入文件相关 - End

+ 67 - 0
src/mainwindow.cpp

@@ -0,0 +1,67 @@
+
+/**
+ * 叶海辉
+ * QQ群121376426
+ * http://blog.yundiantech.com/
+ */
+
+#include "mainwindow.h"
+#include "ui_mainwindow.h"
+
+#include <QDebug>
+#include <QMessageBox>
+
+MainWindow::MainWindow(QWidget *parent) :
+    QMainWindow(parent),
+    ui(new Ui::MainWindow)
+{
+    ui->setupUi(this);
+
+
+    int devNums = waveInGetNumDevs();
+
+    for(int i=0;i<devNums;i++)
+    {
+        WAVEINCAPSW p;
+        waveInGetDevCaps(i,&p,sizeof(WAVEINCAPS));
+        ui->comboBox_audiodeviceList->addItem(QString::fromWCharArray(p.szPname));
+    }
+
+    m_screenRecorder = new ScreenRecorder;
+
+    connect(ui->pushButton_start,SIGNAL(clicked()),this,SLOT(slotBtnClick()));
+    connect(ui->pushButton_stop,SIGNAL(clicked()),this,SLOT(slotBtnClick()));
+}
+
+MainWindow::~MainWindow()
+{
+    delete ui;
+}
+
+
+void MainWindow::slotBtnClick()
+{
+    if (QObject::sender() == ui->pushButton_start)
+    {
+
+        ErroCode code = m_screenRecorder->init(ui->comboBox_audiodeviceList->currentText());
+
+        if (code == SUCCEED)
+        {
+            m_screenRecorder->startRecord();
+            ui->pushButton_start->setEnabled(false);
+            ui->pushButton_stop->setEnabled(true);
+//            SDL_CreateThread(putAudioThread, "parse_thread", NULL);
+        }
+        else
+        {
+            QMessageBox::critical(NULL,"提示","出错了,初始化失败。");
+        }
+    }
+    else if (QObject::sender() == ui->pushButton_stop)
+    {
+        m_screenRecorder->stopRecord();
+        ui->pushButton_stop->setEnabled(false);
+        ui->pushButton_start->setEnabled(true);
+    }
+}

+ 38 - 0
src/mainwindow.h

@@ -0,0 +1,38 @@
+
+/**
+ * 叶海辉
+ * QQ群121376426
+ * http://blog.yundiantech.com/
+ */
+
+#ifndef MAINWINDOW_H
+#define MAINWINDOW_H
+
+#include <QMainWindow>
+
+#include "screenrecorder.h"
+
+#include <windows.h>
+
+namespace Ui {
+class MainWindow;
+}
+
+class MainWindow : public QMainWindow
+{
+    Q_OBJECT
+
+public:
+    explicit MainWindow(QWidget *parent = 0);
+    ~MainWindow();
+
+private:
+    Ui::MainWindow *ui;
+
+    ScreenRecorder *m_screenRecorder;
+
+private slots:
+    void slotBtnClick();
+};
+
+#endif // MAINWINDOW_H

+ 68 - 0
src/mainwindow.ui

@@ -0,0 +1,68 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<ui version="4.0">
+ <class>MainWindow</class>
+ <widget class="QMainWindow" name="MainWindow">
+  <property name="geometry">
+   <rect>
+    <x>0</x>
+    <y>0</y>
+    <width>383</width>
+    <height>185</height>
+   </rect>
+  </property>
+  <property name="windowTitle">
+   <string>MainWindow</string>
+  </property>
+  <widget class="QWidget" name="centralWidget">
+   <widget class="QPushButton" name="pushButton_start">
+    <property name="geometry">
+     <rect>
+      <x>30</x>
+      <y>90</y>
+      <width>131</width>
+      <height>41</height>
+     </rect>
+    </property>
+    <property name="text">
+     <string>start</string>
+    </property>
+   </widget>
+   <widget class="QPushButton" name="pushButton_stop">
+    <property name="geometry">
+     <rect>
+      <x>200</x>
+      <y>90</y>
+      <width>121</width>
+      <height>41</height>
+     </rect>
+    </property>
+    <property name="text">
+     <string>stop</string>
+    </property>
+   </widget>
+   <widget class="QComboBox" name="comboBox_audiodeviceList">
+    <property name="geometry">
+     <rect>
+      <x>20</x>
+      <y>20</y>
+      <width>332</width>
+      <height>41</height>
+     </rect>
+    </property>
+   </widget>
+  </widget>
+  <widget class="QMenuBar" name="menuBar">
+   <property name="geometry">
+    <rect>
+     <x>0</x>
+     <y>0</y>
+     <width>383</width>
+     <height>23</height>
+    </rect>
+   </property>
+  </widget>
+ </widget>
+ <layoutdefault spacing="6" margin="11"/>
+ <resources/>
+ <connections/>
+</ui>

+ 636 - 0
src/savevideofile.cpp

@@ -0,0 +1,636 @@
+
+/**
+ * 叶海辉
+ * QQ群121376426
+ * http://blog.yundiantech.com/
+ */
+
+#include "savevideofile.h"
+
+uint8_t picture_buf[2000*2000*4];
+bool isStop = false;
+
+static float t, tincr, tincr2;
+static int16_t *samples;
+static uint8_t *audio_outbuf;
+static int audio_outbuf_size;
+int audio_input_frame_size;
+
+int WIDTH;
+int HEIGHT;
+
+
+/**************************************************************/
+/* audio output */
+
+
+struct BufferDataNode
+{
+    uint8_t * buffer;
+    int bufferSize;
+    BufferDataNode * next;
+};
+
+SDL_mutex *videoMutex = SDL_CreateMutex();
+BufferDataNode * videoDataQueneHead = NULL;
+BufferDataNode * videoDataQueneTail = NULL;
+
+SDL_mutex *audioMutex = SDL_CreateMutex();
+BufferDataNode * AudioDataQueneHead = NULL;
+BufferDataNode * AudioDataQueneTail = NULL;
+
+void videoDataQuene_Input(uint8_t * buffer,int size)
+{
+    BufferDataNode * node = (BufferDataNode*)malloc(sizeof(BufferDataNode));
+//    node->buffer = (uint8_t *)malloc(size);
+    node->bufferSize = size;
+    node->next = NULL;
+
+    node->buffer = buffer;
+//    memcpy(node->buffer,buffer,size);
+
+    SDL_LockMutex(videoMutex);
+
+    if (videoDataQueneHead == NULL)
+    {
+        videoDataQueneHead = node;
+    }
+    else
+    {
+        videoDataQueneTail->next = node;
+    }
+
+    videoDataQueneTail = node;
+
+    SDL_UnlockMutex(videoMutex);
+}
+
+static BufferDataNode *videoDataQuene_get()
+{
+    BufferDataNode * node = NULL;
+
+    SDL_LockMutex(videoMutex);
+
+    if (videoDataQueneHead != NULL)
+    {
+        node = videoDataQueneHead;
+
+        if (videoDataQueneTail == videoDataQueneHead)
+        {
+            videoDataQueneTail = NULL;
+        }
+
+        videoDataQueneHead = videoDataQueneHead->next;
+    }
+
+    SDL_UnlockMutex(videoMutex);
+
+    return node;
+}
+
+void audioDataQuene_Input(uint8_t * buffer,int size)
+{
+    BufferDataNode * node = (BufferDataNode*)malloc(sizeof(BufferDataNode));
+    node->buffer = (uint8_t *)malloc(size);
+    node->bufferSize = size;
+    node->next = NULL;
+
+    memcpy(node->buffer,buffer,size);
+
+    SDL_LockMutex(audioMutex);
+
+    if (AudioDataQueneHead == NULL)
+    {
+        AudioDataQueneHead = node;
+    }
+    else
+    {
+        AudioDataQueneTail->next = node;
+    }
+
+    AudioDataQueneTail = node;
+
+    SDL_UnlockMutex(audioMutex);
+}
+
+static BufferDataNode *audioDataQuene_get()
+{
+    BufferDataNode * node = NULL;
+
+    SDL_LockMutex(audioMutex);
+
+    if (AudioDataQueneHead != NULL)
+    {
+        node = AudioDataQueneHead;
+
+        if (AudioDataQueneTail == AudioDataQueneHead)
+        {
+            AudioDataQueneTail = NULL;
+        }
+
+        AudioDataQueneHead = AudioDataQueneHead->next;
+    }
+
+    SDL_UnlockMutex(audioMutex);
+
+    return node;
+}
+
+/*
+ * add an audio output stream
+ */
+static AVStream *add_audio_stream(AVFormatContext *oc, AVCodecID codec_id)
+{
+    AVCodecContext *c;
+    AVStream *st;
+
+    st = avformat_new_stream(oc, NULL);
+    if (!st) {
+        fprintf(stderr, "Could not alloc stream\n");
+        exit(1);
+    }
+    st->id = 1;
+
+    c = st->codec;
+    c->codec_id = codec_id;
+    c->codec_type = AVMEDIA_TYPE_AUDIO;
+
+    /* put sample parameters */
+    c->sample_fmt = AV_SAMPLE_FMT_S16;
+    c->bit_rate = 44100;
+    c->sample_rate = 44100;
+    c->channels = 2;
+
+    // some formats want stream headers to be separate
+    if (oc->oformat->flags & AVFMT_GLOBALHEADER)
+        c->flags |= CODEC_FLAG_GLOBAL_HEADER;
+
+    return st;
+}
+
+static void open_audio(AVFormatContext *oc, AVStream *st)
+{
+    AVCodecContext *c;
+    AVCodec *codec;
+
+    c = st->codec;
+
+    /* find the audio encoder */
+    codec = avcodec_find_encoder(c->codec_id);
+    if (!codec) {
+        fprintf(stderr, "codec not found\n");
+        exit(1);
+    }
+
+    /* open it */
+    if (avcodec_open2(c, codec,NULL) < 0) {
+        fprintf(stderr, "could not open codec\n");
+        exit(1);
+    }
+
+    /* init signal generator */
+    t = 0;
+    tincr = 2 * M_PI * 110.0 / c->sample_rate;
+    /* increment frequency by 110 Hz per second */
+    tincr2 = 2 * M_PI * 110.0 / c->sample_rate / c->sample_rate;
+
+    audio_outbuf_size = 10000;
+    audio_outbuf = (uint8_t *)av_malloc(audio_outbuf_size);
+
+
+    if (c->frame_size <= 1) {
+        audio_input_frame_size = audio_outbuf_size / c->channels;
+        switch(st->codec->codec_id) {
+        case CODEC_ID_PCM_S16LE:
+        case CODEC_ID_PCM_S16BE:
+        case CODEC_ID_PCM_U16LE:
+        case CODEC_ID_PCM_U16BE:
+            audio_input_frame_size >>= 1;
+            break;
+        default:
+            break;
+        }
+    } else {
+        audio_input_frame_size = c->frame_size;
+    }
+    samples = (int16_t *)av_malloc(audio_input_frame_size * 2 * c->channels);
+}
+
+static void write_audio_frame(AVFormatContext *oc, AVStream *st)
+{
+    AVCodecContext *c;
+    AVPacket pkt;
+    av_init_packet(&pkt);
+
+    c = st->codec;
+
+    BufferDataNode *node = audioDataQuene_get();
+
+    if (node == NULL)
+    {
+        SDL_Delay(1); //延时1ms
+        return;
+    }
+    else
+    {
+        memcpy(samples,node->buffer, node->bufferSize);
+
+        free(node->buffer);
+        free(node);
+    }
+
+//    fread(samples, 1, audio_input_frame_size*4, pcmInFp);
+
+    pkt.size = avcodec_encode_audio(c, audio_outbuf, audio_outbuf_size, samples);
+
+    if (c->coded_frame && c->coded_frame->pts != AV_NOPTS_VALUE)
+        pkt.pts= av_rescale_q(c->coded_frame->pts, c->time_base, st->time_base);
+    pkt.flags |= AV_PKT_FLAG_KEY;
+    pkt.stream_index = st->index;
+    pkt.data = audio_outbuf;
+
+    /* write the compressed frame in the media file */
+    if (av_interleaved_write_frame(oc, &pkt) != 0) {
+        fprintf(stderr, "Error while writing audio frame\n");
+        exit(1);
+    }
+}
+
+static void close_audio(AVFormatContext *oc, AVStream *st)
+{
+    avcodec_close(st->codec);
+
+    av_free(samples);
+    av_free(audio_outbuf);
+}
+
+/**************************************************************/
+/* video output */
+
+static AVFrame *picture, *tmp_picture;
+static uint8_t *video_outbuf;
+static int video_outbuf_size;
+
+/* add a video output stream */
+static AVStream *add_video_stream(AVFormatContext *oc, AVCodecID codec_id)
+{
+    AVCodecContext *c;
+    AVStream *st;
+    AVCodec *codec;
+
+    st = avformat_new_stream(oc, NULL);
+    if (!st) {
+        fprintf(stderr, "Could not alloc stream\n");
+        exit(1);
+    }
+
+    c = st->codec;
+
+    /* find the video encoder */
+    codec = avcodec_find_encoder(codec_id);
+    if (!codec) {
+        fprintf(stderr, "codec not found\n");
+        exit(1);
+    }
+    avcodec_get_context_defaults3(c, codec);
+
+    c->codec_id = codec_id;
+
+    /* put sample parameters */
+    c->bit_rate = 400000;
+    /* resolution must be a multiple of two */
+    c->width = WIDTH;
+    c->height = HEIGHT;
+    /* time base: this is the fundamental unit of time (in seconds) in terms
+       of which frame timestamps are represented. for fixed-fps content,
+       timebase should be 1/framerate and timestamp increments should be
+       identically 1. */
+    c->time_base.den = 10;
+    c->time_base.num = 1;
+    c->gop_size = 12; /* emit one intra frame every twelve frames at most */
+    c->pix_fmt = PIX_FMT_YUV420P;
+    if (c->codec_id == CODEC_ID_MPEG2VIDEO) {
+        /* just for testing, we also add B frames */
+        c->max_b_frames = 2;
+    }
+    if (c->codec_id == CODEC_ID_MPEG1VIDEO){
+        /* Needed to avoid using macroblocks in which some coeffs overflow.
+           This does not happen with normal video, it just happens here as
+           the motion of the chroma plane does not match the luma plane. */
+        c->mb_decision=2;
+    }
+    // some formats want stream headers to be separate
+    if (oc->oformat->flags & AVFMT_GLOBALHEADER)
+        c->flags |= CODEC_FLAG_GLOBAL_HEADER;
+
+    return st;
+}
+
+static AVFrame *alloc_picture(enum PixelFormat pix_fmt, int width, int height)
+{
+    AVFrame *picture;
+    uint8_t *picture_buf;
+    int size;
+
+    picture = avcodec_alloc_frame();
+    if (!picture)
+        return NULL;
+    size = avpicture_get_size(pix_fmt, width, height);
+    picture_buf = (uint8_t *)av_malloc(size);
+    if (!picture_buf) {
+        av_free(picture);
+        return NULL;
+    }
+    avpicture_fill((AVPicture *)picture, picture_buf,
+                   pix_fmt, width, height);
+    return picture;
+}
+
+static void open_video(AVFormatContext *oc, AVStream *st)
+{
+    AVCodec *codec;
+    AVCodecContext *c;
+
+    c = st->codec;
+
+
+
+    // Set Option
+    AVDictionary *param = 0;
+    //H.264
+    //av_dict_set(&param, "preset", "slow", 0);
+    av_dict_set(&param, "preset", "superfast", 0);
+    av_dict_set(&param, "tune", "zerolatency", 0);  //实现实时编码
+
+qDebug()<<c->codec_id;
+    codec = avcodec_find_encoder(c->codec_id);
+    if (!codec){
+      printf("Can not find video encoder!\n");
+      exit(1);
+    }
+qDebug()<<"333";
+int ret = 0;
+    if (ret = avcodec_open2(c, codec,&param) < 0){
+      qDebug()<<("Failed to open video encoder!\n")<<ret;
+      exit(1);
+    }
+qDebug()<<"333";
+//    /* find the video encoder */
+//    codec = avcodec_find_encoder(c->codec_id);
+//    if (!codec) {
+//        fprintf(stderr, "codec not found\n");
+//        exit(1);
+//    }
+
+//    /* open the codec */
+//    if (avcodec_open2(c, codec, NULL) < 0) {
+//        fprintf(stderr, "could not open codec\n");
+//        exit(1);
+//    }
+
+    video_outbuf = NULL;
+    if (!(oc->oformat->flags & AVFMT_RAWPICTURE)) {
+        video_outbuf_size = 200000;
+        video_outbuf = (uint8_t *)av_malloc(video_outbuf_size);
+    }
+
+    /* allocate the encoded raw picture */
+    picture = alloc_picture(c->pix_fmt, c->width, c->height);
+    if (!picture) {
+        fprintf(stderr, "Could not allocate picture\n");
+        exit(1);
+    }
+
+    /* if the output format is not YUV420P, then a temporary YUV420P
+       picture is needed too. It is then converted to the required
+       output format */
+    tmp_picture = NULL;
+    if (c->pix_fmt != PIX_FMT_YUV420P) {
+        tmp_picture = alloc_picture(PIX_FMT_YUV420P, c->width, c->height);
+        if (!tmp_picture) {
+            fprintf(stderr, "Could not allocate temporary picture\n");
+            exit(1);
+        }
+    }
+}
+
+static void write_video_frame(AVFormatContext *oc, AVStream *st)
+{
+
+    int out_size, ret;
+    AVCodecContext *c;
+
+    c = st->codec;
+
+    BufferDataNode *node = videoDataQuene_get();
+
+    if (node == NULL)
+    {
+        SDL_Delay(1); //延时1ms
+        return;
+    }
+    else
+    {
+        int y_size = c->width * c->height;
+
+        memcpy(picture_buf,node->buffer, y_size*3/2);
+
+        av_free(node->buffer);
+        free(node);
+
+        picture->data[0] = picture_buf;  // 亮度Y
+        picture->data[1] = picture_buf+ y_size;  // U
+        picture->data[2] = picture_buf+ y_size*5/4; // V
+    }
+
+    if (oc->oformat->flags & AVFMT_RAWPICTURE) {
+        /* raw video case. The API will change slightly in the near
+           future for that. */
+        AVPacket pkt;
+        av_init_packet(&pkt);
+
+        pkt.flags |= AV_PKT_FLAG_KEY;
+        pkt.stream_index = st->index;
+        pkt.data = (uint8_t *)picture;
+        pkt.size = sizeof(AVPicture);
+        ret = av_interleaved_write_frame(oc, &pkt);
+    } else {
+        /* encode the image */
+        out_size = avcodec_encode_video(c, video_outbuf, video_outbuf_size, picture);
+        /* if zero size, it means the image was buffered */
+        if (out_size > 0) {
+            AVPacket pkt;
+            av_init_packet(&pkt);
+
+            if (c->coded_frame->pts != AV_NOPTS_VALUE)
+                pkt.pts= av_rescale_q(c->coded_frame->pts, c->time_base, st->time_base);
+            if(c->coded_frame->key_frame)
+                pkt.flags |= AV_PKT_FLAG_KEY;
+            pkt.stream_index = st->index;
+
+            pkt.data = video_outbuf;
+            pkt.size = out_size;
+
+            /* write the compressed frame in the media file */
+            ret = av_interleaved_write_frame(oc, &pkt);
+        } else {
+            ret = 0;
+        }
+    }
+
+    if (ret != 0) {
+        fprintf(stderr, "Error while writing video frame\n");
+        exit(1);
+    }
+
+}
+
+static void close_video(AVFormatContext *oc, AVStream *st)
+{
+    avcodec_close(st->codec);
+    av_free(picture->data[0]);
+    av_free(picture);
+    if (tmp_picture) {
+        av_free(tmp_picture->data[0]);
+        av_free(tmp_picture);
+    }
+    av_free(video_outbuf);
+}
+
+int encode_thread(void *arg)
+{
+    const char *filename;
+    AVOutputFormat *fmt;
+    AVFormatContext *oc;
+    AVStream *audio_st, *video_st;
+    double audio_pts, video_pts;
+    int i;
+
+    /* initialize libavcodec, and register all codecs and formats */
+    av_register_all();
+
+    filename = "a.mp4";
+
+    /* allocate the output media context */
+    avformat_alloc_output_context2(&oc, NULL, "mp4", filename);
+    if (!oc) {
+        qDebug()<<("Could not deduce output format from file extension: using MPEG.\n");
+        avformat_alloc_output_context2(&oc, NULL, "mpeg", filename);
+    }
+    if (!oc) {
+        return 1;
+    }
+    fmt = oc->oformat;
+
+    /* add the audio and video streams using the default format codecs
+       and initialize the codecs */
+    video_st = NULL;
+    audio_st = NULL;
+
+    qDebug()<<fmt->video_codec;
+    if (fmt->video_codec != CODEC_ID_NONE) {
+        video_st = add_video_stream(oc, fmt->video_codec);
+    }
+    qDebug()<<"333";
+    if (fmt->audio_codec != CODEC_ID_NONE) {
+        audio_st = add_audio_stream(oc, fmt->audio_codec);
+    }
+qDebug()<<"444";
+    av_dump_format(oc, 0, filename, 1);
+
+    /* now that all the parameters are set, we can open the audio and
+       video codecs and allocate the necessary encode buffers */
+    if (video_st)
+        open_video(oc, video_st);
+    if (audio_st)
+        open_audio(oc, audio_st);
+qDebug()<<"777";
+    /* open the output file, if needed */
+    if (!(fmt->flags & AVFMT_NOFILE)) {
+        if (avio_open(&oc->pb, filename, AVIO_FLAG_WRITE) < 0) {
+            fprintf(stderr, "Could not open '%s'\n", filename);
+            return 1;
+        }
+    }
+
+    /* write the stream header, if any */
+//    av_write_header(oc);
+    avformat_write_header(oc,NULL);
+
+    picture->pts = 0;
+    while(!isStop)
+    {
+        /* compute current audio and video time */
+        if (audio_st)
+            audio_pts = (double)audio_st->pts.val * audio_st->time_base.num / audio_st->time_base.den;
+        else
+            audio_pts = 0.0;
+
+        if (video_st)
+            video_pts = (double)video_st->pts.val * video_st->time_base.num / video_st->time_base.den;
+        else
+            video_pts = 0.0;
+
+        qDebug()<<audio_pts<<video_pts;
+
+//        if ((!audio_st || audio_pts >= STREAM_DURATION) &&
+//            (!video_st || video_pts >= STREAM_DURATION))
+//            break;
+
+        /* write interleaved audio and video frames */
+        if (!video_st || (video_st && audio_st && audio_pts < video_pts)) {
+            write_audio_frame(oc, audio_st);
+        } else {
+            write_video_frame(oc, video_st);
+        }
+    }
+
+    av_write_trailer(oc);
+
+    /* close each codec */
+    if (video_st)
+        close_video(oc, video_st);
+    if (audio_st)
+        close_audio(oc, audio_st);
+
+    /* free the streams */
+    for(i = 0; i < oc->nb_streams; i++) {
+        av_freep(&oc->streams[i]->codec);
+        av_freep(&oc->streams[i]);
+    }
+
+    if (!(fmt->flags & AVFMT_NOFILE)) {
+        /* close the output file */
+        avio_close(oc->pb);
+    }
+
+    /* free the stream */
+    av_free(oc);
+
+    return 0;
+}
+
+void setWidth(int width,int height)
+{
+    WIDTH = width;
+    HEIGHT = height;
+}
+
+bool startEncode()
+{
+    SDL_Thread *encodeThreadId = SDL_CreateThread(encode_thread, "parse_thread", NULL);
+
+    if (!encodeThreadId)
+    {
+        return false;
+    }
+
+    return true;
+}
+
+bool stopEncode()
+{
+    isStop = true;
+
+    return true;
+}

+ 36 - 0
src/savevideofile.h

@@ -0,0 +1,36 @@
+
+/**
+ * 叶海辉
+ * QQ群121376426
+ * http://blog.yundiantech.com/
+ */
+
+#ifndef SAVEVIDEOFILE_H
+#define SAVEVIDEOFILE_H
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <math.h>
+
+extern"C"
+{
+#include "libavutil/mathematics.h"
+#include "libavformat/avformat.h"
+#include "libswscale/swscale.h"
+}
+
+#include "SDL.h"
+#include "SDL_thread.h"
+#include "SDL_events.h"
+
+#include <QDebug>
+
+void setWidth(int width,int height);
+bool startEncode();
+bool stopEncode();
+
+void videoDataQuene_Input(uint8_t * buffer,int size);
+void audioDataQuene_Input(uint8_t * buffer,int size);
+
+#endif // SAVEVIDEOFILE_H

+ 266 - 0
src/screenrecorder.cpp

@@ -0,0 +1,266 @@
+
+/**
+ * 叶海辉
+ * QQ群121376426
+ * http://blog.yundiantech.com/
+ */
+
+#include "screenrecorder.h"
+
+#include <QDateTime>
+#include <QDebug>
+
+extern int audio_input_frame_size;
+
+ScreenRecorder::ScreenRecorder()
+{
+    m_isRun = false;
+}
+
+ScreenRecorder::~ScreenRecorder()
+{
+
+}
+
+ErroCode ScreenRecorder::init(QString audioDevName)
+{
+
+    AVCodec			*pCodec = NULL;
+    AVCodec			*aCodec = NULL;
+
+    av_register_all();
+    avformat_network_init();
+    avdevice_register_all();  //Register Device
+
+    pFormatCtx = avformat_alloc_context();
+
+    AVInputFormat *ifmt = av_find_input_format("dshow");
+
+    QString audioDevOption = QString("audio=%1").arg(audioDevName);
+    if(avformat_open_input(&pFormatCtx,audioDevOption.toUtf8(),ifmt,NULL)!=0){
+        fprintf(stderr,"Couldn't open input stream audio.(无法打开输入流)\n");
+        return AudioOpenFailed;
+    }
+
+
+    if(avformat_open_input(&pFormatCtx,"video=screen-capture-recorder",ifmt,NULL)!=0){
+        fprintf(stderr,"Couldn't open input stream video.(无法打开输入流)\n");
+        return VideoOpenFailed;
+    }
+
+    videoindex=-1;
+    for(i=0; i<pFormatCtx->nb_streams; i++)
+        if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO)
+        {
+            videoindex=i;
+            break;
+        }
+    if(videoindex==-1)
+    {
+        printf("Didn't find a video stream.(没有找到视频流)\n");
+        return VideoOpenFailed;
+    }
+
+    pCodecCtx = pFormatCtx->streams[videoindex]->codec;
+    pCodec = avcodec_find_decoder(pCodecCtx->codec_id);
+    if(pCodec == NULL)
+    {
+        printf("video Codec not found.\n");
+        return VideoDecoderOpenFailed;
+    }
+
+    if(avcodec_open2(pCodecCtx, pCodec,NULL)<0)
+    {
+        printf("Could not open video codec.\n");
+        return VideoDecoderOpenFailed;
+    }
+
+    audioindex = -1;
+    aCodecCtx = NULL;
+
+    for(i=0; i<pFormatCtx->nb_streams; i++)
+        if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_AUDIO)
+        {
+            audioindex=i;
+            break;
+        }
+    if(audioindex==-1)
+    {
+        printf("Didn't find a video stream.(没有找到视频流)\n");
+        return AudioOpenFailed;
+    }
+
+    aCodecCtx = pFormatCtx->streams[audioindex]->codec;
+    aCodec = avcodec_find_decoder(aCodecCtx->codec_id);
+    if(aCodec == NULL)
+    {
+        printf("audio Codec not found.\n");
+        return AudioDecoderOpenFailed;
+    }
+
+    if(avcodec_open2(aCodecCtx, aCodec,NULL)<0)
+    {
+        printf("Could not open video codec.\n");
+        return AudioDecoderOpenFailed;
+    }
+
+
+    aFrame=avcodec_alloc_frame();
+    pFrame=avcodec_alloc_frame();
+    pFrameYUV=avcodec_alloc_frame();
+    out_buffer=(uint8_t *)av_malloc(avpicture_get_size(PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height));
+    avpicture_fill((AVPicture *)pFrameYUV, out_buffer, PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height);
+
+    setWidth(pCodecCtx->width, pCodecCtx->height);
+
+    return SUCCEED;
+}
+
+void ScreenRecorder::deInit()
+{
+    av_free(out_buffer);
+    av_free(aFrame);
+    av_free(pFrame);
+    av_free(pFrameYUV);
+    avcodec_close(pCodecCtx);
+    if (aCodecCtx)
+        avcodec_close(aCodecCtx);
+
+///下面这2个释放这里会奔溃 这里先无视 后面再完善它
+//    avformat_close_input(&pFormatCtx);
+//    avformat_free_context(pFormatCtx);
+}
+
+void ScreenRecorder::startRecord()
+{
+    m_isRun = true;
+//    m_writeFile->startWrite();
+    startEncode();
+    start();
+}
+
+void ScreenRecorder::stopRecord()
+{
+    m_isRun = false;
+
+    stopEncode();
+}
+
+void ScreenRecorder::run()
+{
+
+    int ret, got_frame;
+
+    AVPacket *packet=(AVPacket *)av_malloc(sizeof(AVPacket));
+//    //Output Information-----------------------------
+//    printf("File Information(文件信息)---------------------\n");
+//    av_dump_format(pFormatCtx,0,NULL,0);
+//    printf("-------------------------------------------------\n");
+
+    struct SwsContext *img_convert_ctx;
+    img_convert_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height, PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL);
+    //------------------------------
+
+
+    int y_size = pCodecCtx->width * pCodecCtx->height;
+
+    int size = avpicture_get_size(pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height);
+
+
+    qint64 firstTime = QDateTime::currentMSecsSinceEpoch();
+
+    while(m_isRun )
+    {
+        if (av_read_frame(pFormatCtx, packet)<0)
+        {
+            msleep(10);
+            continue;
+        }
+
+        if(packet->stream_index==videoindex)
+        {
+            qint64 secondTime = QDateTime::currentMSecsSinceEpoch();
+
+            if ((secondTime - firstTime) >= 100)
+            {
+                firstTime = secondTime;
+                ret = avcodec_decode_video2(pCodecCtx, pFrame, &got_frame, packet);
+                if(ret < 0)
+                {
+                    printf("video Decode Error.(解码错误)\n");
+                    return;
+                }
+
+                if(got_frame)
+                {
+                    sws_scale(img_convert_ctx, (const uint8_t* const*)pFrame->data, pFrame->linesize, 0, pCodecCtx->height, pFrameYUV->data, pFrameYUV->linesize);
+
+                    uint8_t * picture_buf = (uint8_t *)av_malloc(size);
+                    memcpy(picture_buf,pFrameYUV->data[0],y_size);
+                    memcpy(picture_buf+y_size,pFrameYUV->data[1],y_size/4);
+                    memcpy(picture_buf+y_size+y_size/4,pFrameYUV->data[2],y_size/4);
+
+
+                    videoDataQuene_Input(picture_buf,y_size*3/2);
+                }
+            }
+        }
+        else if(packet->stream_index == audioindex)
+        {
+
+            ret = avcodec_decode_audio4(aCodecCtx, aFrame, &got_frame, packet);
+            if(ret < 0)
+            {
+                fprintf(stderr,"video Audio Error.\n");
+                return;
+            }
+
+            if (got_frame)
+            {
+
+                int size = av_samples_get_buffer_size(NULL,aCodecCtx->channels, aFrame->nb_samples,aCodecCtx->sample_fmt, 1);
+
+                int index = 0;
+
+                int ONEAudioSize = audio_input_frame_size * 4;//4096
+
+                for (int i=0;i<(size/ONEAudioSize);i++)
+                {
+
+                    int framSize = ONEAudioSize;
+                    if (i==size/ONEAudioSize)
+                    {
+                        framSize = size%ONEAudioSize;
+                    }
+
+                    if (framSize<=0){
+                        break;
+                    }
+
+                    uint8_t * audio_buf = (uint8_t *)malloc(4096*2);
+                    memcpy(audio_buf, aFrame->data[0]+index, framSize);
+
+                    audioDataQuene_Input((uint8_t*)audio_buf,ONEAudioSize);
+
+                    index += framSize;
+
+                }
+            }
+
+        }
+
+        av_free_packet(packet);
+    }
+
+    qDebug()<<"record stopping...";
+
+//    m_writeFile->stopWrite();
+
+    qDebug()<<"record finished!";
+
+    sws_freeContext(img_convert_ctx);
+
+
+    deInit();
+
+}

+ 68 - 0
src/screenrecorder.h

@@ -0,0 +1,68 @@
+
+/**
+ * 叶海辉
+ * QQ群121376426
+ * http://blog.yundiantech.com/
+ */
+
+#ifndef SCREENRECORDER_H
+#define SCREENRECORDER_H
+
+#include <QThread>
+
+extern "C"
+{
+#include "libavcodec/avcodec.h"
+#include "libavformat/avformat.h"
+#include "libswscale/swscale.h"
+#include "libavdevice/avdevice.h"
+//SDL
+#include "SDL.h"
+#include "SDL_thread.h"
+}
+
+#include "savevideofile.h"
+
+enum ErroCode
+{
+    AudioOpenFailed = 0,
+    VideoOpenFailed,
+    AudioDecoderOpenFailed,
+    VideoDecoderOpenFailed,
+    SUCCEED
+};
+
+class ScreenRecorder : public QThread
+{
+    Q_OBJECT
+
+public:
+    explicit ScreenRecorder();
+    ~ScreenRecorder();
+
+    ErroCode init(QString audioDevName);
+    void deInit();
+
+    void startRecord();
+    void stopRecord();
+
+protected:
+    void run();
+
+private:
+
+
+    AVFormatContext	*pFormatCtx;
+    int				i, videoindex ,audioindex;
+    AVCodecContext	*pCodecCtx,*aCodecCtx;
+
+
+    AVFrame	*pFrame,*aFrame,*pFrameYUV;
+    uint8_t *out_buffer;
+
+    bool m_isRun;
+
+
+};
+
+#endif // SCREENRECORDER_H

+ 2 - 2
说明.txt

@@ -1,4 +1,4 @@
-从零开始学习音视频编程技术(十九) 录屏软件开发之YUV AAC合成MP4
+从零开始学习音视频编程技术(二十) 录屏软件开发之录屏生成MP4
 
 
 这是Qt的工程,建议使用Qt Creator 打开
@@ -12,7 +12,7 @@ FFMPEG
 
 
 关于代码的解释 请参考:
-http://blog.yundiantech.com/?log=blog&id=26
+http://blog.yundiantech.com/?log=blog&id=27
 
 
 Qt开发环境的搭建 请参考:

部分文件因为文件数量过多而无法显示