Browse Source

V1.4.0

1.音视频同步进阶,将视频解码放入新线程
叶海辉 6 years ago
parent
commit
67ed902d8e
4 changed files with 519 additions and 193 deletions
  1. 1 1
      src/mainwindow.cpp
  2. 475 181
      src/videoplayer/videoplayer.cpp
  3. 41 6
      src/videoplayer/videoplayer.h
  4. 2 5
      说明.txt

+ 1 - 1
src/mainwindow.cpp

@@ -20,7 +20,7 @@ MainWindow::MainWindow(QWidget *parent) :
     mPlayer = new VideoPlayer;
     connect(mPlayer,SIGNAL(sig_GetOneFrame(QImage)),this,SLOT(slotGetOneFrame(QImage)));
 
-    mPlayer->setFileName("E:\\in.mp4");
+    mPlayer->setFileName("E:\\in.rmvb");
     mPlayer->startPlay();
 
 }

+ 475 - 181
src/videoplayer/videoplayer.cpp

@@ -17,6 +17,10 @@ void packet_queue_init(PacketQueue *q) {
     memset(q, 0, sizeof(PacketQueue));
     q->mutex = SDL_CreateMutex();
     q->cond = SDL_CreateCond();
+    q->size = 0;
+    q->nb_packets = 0;
+    q->first_pkt = NULL;
+    q->last_pkt = NULL;
 }
 
 int packet_queue_put(PacketQueue *q, AVPacket *pkt) {
@@ -71,99 +75,184 @@ static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block) {
         } else {
             SDL_CondWait(q->cond, q->mutex);
         }
+
     }
+
     SDL_UnlockMutex(q->mutex);
     return ret;
 }
 
-int audio_decode_frame(VideoState *is, uint8_t *audio_buf, int buf_size)
+static int audio_decode_frame(VideoState *is, double *pts_ptr)
 {
+    int len1, len2, decoded_data_size;
+    AVPacket *pkt = &is->audio_pkt;
+    int got_frame = 0;
+    int64_t dec_channel_layout;
+    int wanted_nb_samples, resampled_data_size, n;
 
-    static AVPacket pkt;
-    static uint8_t *audio_pkt_data = NULL;
-    static int audio_pkt_size = 0;
-    int len1, data_size;
+    double pts;
 
-    AVCodecContext *aCodecCtx = is->aCodecCtx;
-    AVFrame *audioFrame = is->audioFrame;
-    PacketQueue *audioq = is->audioq;
+    for (;;) {
 
-    for(;;)
-    {
-        if(packet_queue_get(audioq, &pkt, 1) < 0)
-        {
-            return -1;
-        }
-        audio_pkt_data = pkt.data;
-        audio_pkt_size = pkt.size;
-        while(audio_pkt_size > 0)
-        {
-            int got_picture;
+        while (is->audio_pkt_size > 0) {
 
-            int ret = avcodec_decode_audio4( aCodecCtx, audioFrame, &got_picture, &pkt);
-            if( ret < 0 ) {
-                printf("Error in decoding audio frame.\n");
-                exit(0);
-            }
+//            if (is->isPause == true)
+//            {
+//                SDL_Delay(10);
+//                continue;
+//            }
 
-            if( got_picture ) {
-                int in_samples = audioFrame->nb_samples;
-                short *sample_buffer = (short*)malloc(audioFrame->nb_samples * 2 * 2);
-                memset(sample_buffer, 0, audioFrame->nb_samples * 4);
-
-                int i=0;
-                float *inputChannel0 = (float*)(audioFrame->extended_data[0]);
-
-                // Mono
-                if( audioFrame->channels == 1 ) {
-                    for( i=0; i<in_samples; i++ ) {
-                        float sample = *inputChannel0++;
-                        if( sample < -1.0f ) {
-                            sample = -1.0f;
-                        } else if( sample > 1.0f ) {
-                            sample = 1.0f;
-                        }
-
-                        sample_buffer[i] = (int16_t)(sample * 32767.0f);
-                    }
-                } else { // Stereo
-                    float* inputChannel1 = (float*)(audioFrame->extended_data[1]);
-                    for( i=0; i<in_samples; i++) {
-                        sample_buffer[i*2] = (int16_t)((*inputChannel0++) * 32767.0f);
-                        sample_buffer[i*2+1] = (int16_t)((*inputChannel1++) * 32767.0f);
-                    }
+            if (!is->audio_frame) {
+                if (!(is->audio_frame = avcodec_alloc_frame())) {
+                    return AVERROR(ENOMEM);
                 }
-//                fwrite(sample_buffer, 2, in_samples*2, pcmOutFp);
-                memcpy(audio_buf,sample_buffer,in_samples*4);
-                free(sample_buffer);
+            } else
+                avcodec_get_frame_defaults(is->audio_frame);
+
+            len1 = avcodec_decode_audio4(is->audio_st->codec, is->audio_frame,
+                    &got_frame, pkt);
+            if (len1 < 0) {
+                // error, skip the frame
+                is->audio_pkt_size = 0;
+                break;
             }
 
-            audio_pkt_size -= ret;
+            is->audio_pkt_data += len1;
+            is->audio_pkt_size -= len1;
 
-            if (audioFrame->nb_samples <= 0)
-            {
+            if (!got_frame)
                 continue;
+
+            /* 计算解码出来的桢需要的缓冲大小 */
+            decoded_data_size = av_samples_get_buffer_size(NULL,
+                    is->audio_frame->channels, is->audio_frame->nb_samples,
+                    (AVSampleFormat)is->audio_frame->format, 1);
+
+            dec_channel_layout =
+                    (is->audio_frame->channel_layout
+                            && is->audio_frame->channels
+                                    == av_get_channel_layout_nb_channels(
+                                            is->audio_frame->channel_layout)) ?
+                            is->audio_frame->channel_layout :
+                            av_get_default_channel_layout(
+                                    is->audio_frame->channels);
+
+            wanted_nb_samples = is->audio_frame->nb_samples;
+
+            if (is->audio_frame->format != is->audio_src_fmt
+                    || dec_channel_layout != is->audio_src_channel_layout
+                    || is->audio_frame->sample_rate != is->audio_src_freq
+                    || (wanted_nb_samples != is->audio_frame->nb_samples
+                            && !is->swr_ctx)) {
+                if (is->swr_ctx)
+                    swr_free(&is->swr_ctx);
+                is->swr_ctx = swr_alloc_set_opts(NULL,
+                        is->audio_tgt_channel_layout, (AVSampleFormat)is->audio_tgt_fmt,
+                        is->audio_tgt_freq, dec_channel_layout,
+                        (AVSampleFormat)is->audio_frame->format, is->audio_frame->sample_rate,
+                        0, NULL);
+                if (!is->swr_ctx || swr_init(is->swr_ctx) < 0) {
+                    //fprintf(stderr,"swr_init() failed\n");
+                    break;
+                }
+                is->audio_src_channel_layout = dec_channel_layout;
+                is->audio_src_channels = is->audio_st->codec->channels;
+                is->audio_src_freq = is->audio_st->codec->sample_rate;
+                is->audio_src_fmt = is->audio_st->codec->sample_fmt;
+            }
+
+            /* 这里我们可以对采样数进行调整,增加或者减少,一般可以用来做声画同步 */
+            if (is->swr_ctx) {
+                const uint8_t **in =
+                        (const uint8_t **) is->audio_frame->extended_data;
+                uint8_t *out[] = { is->audio_buf2 };
+                if (wanted_nb_samples != is->audio_frame->nb_samples) {
+                    if (swr_set_compensation(is->swr_ctx,
+                            (wanted_nb_samples - is->audio_frame->nb_samples)
+                                    * is->audio_tgt_freq
+                                    / is->audio_frame->sample_rate,
+                            wanted_nb_samples * is->audio_tgt_freq
+                                    / is->audio_frame->sample_rate) < 0) {
+                        //fprintf(stderr,"swr_set_compensation() failed\n");
+                        break;
+                    }
+                }
+
+                len2 = swr_convert(is->swr_ctx, out,
+                        sizeof(is->audio_buf2) / is->audio_tgt_channels
+                                / av_get_bytes_per_sample(is->audio_tgt_fmt),
+                        in, is->audio_frame->nb_samples);
+                if (len2 < 0) {
+                    //fprintf(stderr,"swr_convert() failed\n");
+                    break;
+                }
+                if (len2
+                        == sizeof(is->audio_buf2) / is->audio_tgt_channels
+                                / av_get_bytes_per_sample(is->audio_tgt_fmt)) {
+                    //fprintf(stderr,"warning: audio buffer is probably too small\n");
+                    swr_init(is->swr_ctx);
+                }
+                is->audio_buf = is->audio_buf2;
+                resampled_data_size = len2 * is->audio_tgt_channels
+                        * av_get_bytes_per_sample(is->audio_tgt_fmt);
+            } else {
+                resampled_data_size = decoded_data_size;
+                is->audio_buf = is->audio_frame->data[0];
             }
 
-            data_size = audioFrame->nb_samples * 4;
+            pts = is->audio_clock;
+            *pts_ptr = pts;
+            n = 2 * is->audio_st->codec->channels;
+            is->audio_clock += (double) resampled_data_size
+                    / (double) (n * is->audio_st->codec->sample_rate);
 
-            return data_size;
+            // We have data, return it and come back for more later
+            return resampled_data_size;
         }
-        if(pkt.data)
-            av_free_packet(&pkt);
-   }
+
+//        if (is->isPause == true)
+//        {
+//            SDL_Delay(10);
+//            continue;
+//        }
+
+        if (pkt->data)
+            av_free_packet(pkt);
+        memset(pkt, 0, sizeof(*pkt));
+//        if (is->quit)
+//            return -1;
+        if (packet_queue_get(&is->audioq, pkt, 0) <= 0)
+            return -1;
+
+//        if(pkt->data == is->flush_pkt.data) {
+////fprintf(stderr,"avcodec_flush_buffers(is->audio...\n");
+//        avcodec_flush_buffers(is->audio_st->codec);
+////        fprintf(stderr,"avcodec_flush_buffers(is->audio 222\n");
+
+//        continue;
+
+//        }
+
+        is->audio_pkt_data = pkt->data;
+        is->audio_pkt_size = pkt->size;
+
+        /* if update, update the audio clock w/pts */
+        if (pkt->pts != AV_NOPTS_VALUE) {
+            is->audio_clock = av_q2d(is->audio_st->time_base) * pkt->pts;
+        }
+    }
+
+    return 0;
 }
 
-void audio_callback(void *userdata, Uint8 *stream, int len)
-{
-//    AVCodecContext *aCodecCtx = (AVCodecContext *) userdata;
+
+static void audio_callback(void *userdata, Uint8 *stream, int len) {
     VideoState *is = (VideoState *) userdata;
 
+//    qDebug()<<"audio_callback...";
     int len1, audio_data_size;
 
-    static uint8_t audio_buf[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
-    static unsigned int audio_buf_size = 0;
-    static unsigned int audio_buf_index = 0;
+    double pts;
 
     /*   len是由SDL传入的SDL缓冲区的大小,如果这个缓冲未满,我们就一直往里填充数据 */
     while (len > 0) {
@@ -171,31 +260,62 @@ void audio_callback(void *userdata, Uint8 *stream, int len)
         /*   这些数据待copy到SDL缓冲区, 当audio_buf_index >= audio_buf_size的时候意味着我*/
         /*   们的缓冲为空,没有数据可供copy,这时候需要调用audio_decode_frame来解码出更
          /*   多的桢数据 */
-
-        if (audio_buf_index >= audio_buf_size) {
-            audio_data_size = audio_decode_frame(is, audio_buf,sizeof(audio_buf));
+//        qDebug()<<"audio_decode_frame....";
+        if (is->audio_buf_index >= is->audio_buf_size) {
+            audio_data_size = audio_decode_frame(is, &pts);
             /* audio_data_size < 0 标示没能解码出数据,我们默认播放静音 */
             if (audio_data_size < 0) {
                 /* silence */
-                audio_buf_size = 1024;
+                is->audio_buf_size = 1024;
                 /* 清零,静音 */
-                memset(audio_buf, 0, audio_buf_size);
+                memset(is->audio_buf, 0, is->audio_buf_size);
             } else {
-                audio_buf_size = audio_data_size;
+                is->audio_buf_size = audio_data_size;
             }
-            audio_buf_index = 0;
+            is->audio_buf_index = 0;
         }
+
+//        qDebug()<<"audio_decode_frame finished!";
         /*  查看stream可用空间,决定一次copy多少数据,剩下的下次继续copy */
-        len1 = audio_buf_size - audio_buf_index;
+        len1 = is->audio_buf_size - is->audio_buf_index;
         if (len1 > len) {
             len1 = len;
         }
 
-        memcpy(stream, (uint8_t *) audio_buf + audio_buf_index, len1);
+        memcpy(stream, (uint8_t *) is->audio_buf + is->audio_buf_index, len1);
+//        SDL_MixAudio(stream, (uint8_t * )is->audio_buf + is->audio_buf_index, len1, 50);
+
+//        SDL_MixAudioFormat(stream, (uint8_t * )is->audio_buf + is->audio_buf_index, AUDIO_S16SYS, len1, 50);
+
+
         len -= len1;
         stream += len1;
-        audio_buf_index += len1;
+        is->audio_buf_index += len1;
+    }
+
+//    qDebug()<<"audio_callback finished";
+
+
+}
+
+static double get_audio_clock(VideoState *is)
+{
+    double pts;
+    int hw_buf_size, bytes_per_sec, n;
+
+    pts = is->audio_clock; /* maintained in the audio thread */
+    hw_buf_size = is->audio_buf_size - is->audio_buf_index;
+    bytes_per_sec = 0;
+    n = is->audio_st->codec->channels * 2;
+    if(is->audio_st)
+    {
+        bytes_per_sec = is->audio_st->codec->sample_rate * n;
     }
+    if(bytes_per_sec)
+    {
+        pts -= (double)hw_buf_size / bytes_per_sec;
+    }
+    return pts;
 }
 
 static double synchronize_video(VideoState *is, AVFrame *src_frame, double pts) {
@@ -217,6 +337,238 @@ static double synchronize_video(VideoState *is, AVFrame *src_frame, double pts)
     return pts;
 }
 
+int audio_stream_component_open(VideoState *is, int stream_index)
+{
+    AVFormatContext *ic = is->ic;
+    AVCodecContext *codecCtx;
+    AVCodec *codec;
+    SDL_AudioSpec wanted_spec, spec;
+    int64_t wanted_channel_layout = 0;
+    int wanted_nb_channels;
+    /*  SDL支持的声道数为 1, 2, 4, 6 */
+    /*  后面我们会使用这个数组来纠正不支持的声道数目 */
+    const int next_nb_channels[] = { 0, 0, 1, 6, 2, 6, 4, 6 };
+
+    if (stream_index < 0 || stream_index >= ic->nb_streams) {
+        return -1;
+    }
+
+    codecCtx = ic->streams[stream_index]->codec;
+    wanted_nb_channels = codecCtx->channels;
+    if (!wanted_channel_layout
+            || wanted_nb_channels
+                    != av_get_channel_layout_nb_channels(
+                            wanted_channel_layout)) {
+        wanted_channel_layout = av_get_default_channel_layout(
+                wanted_nb_channels);
+        wanted_channel_layout &= ~AV_CH_LAYOUT_STEREO_DOWNMIX;
+    }
+
+    wanted_spec.channels = av_get_channel_layout_nb_channels(
+            wanted_channel_layout);
+    wanted_spec.freq = codecCtx->sample_rate;
+    if (wanted_spec.freq <= 0 || wanted_spec.channels <= 0) {
+        //fprintf(stderr,"Invalid sample rate or channel count!\n");
+        return -1;
+    }
+    wanted_spec.format = AUDIO_S16SYS; // 具体含义请查看“SDL宏定义”部分
+    wanted_spec.silence = 0;            // 0指示静音
+    wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;  // 自定义SDL缓冲区大小
+    wanted_spec.callback = audio_callback;        // 音频解码的关键回调函数
+    wanted_spec.userdata = is;                    // 传给上面回调函数的外带数据
+
+//    SDL_AudioDeviceID audioID = 1;// = SDL_OpenAudioDevice("",0,&wanted_spec, &spec,1);
+//    int num = SDL_GetNumAudioDevices(0);
+//    for (int i=0;i<num;i++)
+//    {
+//        qDebug()<<SDL_GetAudioDeviceName(i,0);
+//    }
+
+//    ///  打开SDL播放设备 - 开始
+//    SDL_LockAudio();
+//    SDL_AudioSpec spec;
+//    SDL_AudioSpec wanted_spec;
+//    wanted_spec.freq = aCodecCtx->sample_rate;
+//    wanted_spec.format = AUDIO_S16SYS;
+//    wanted_spec.channels = aCodecCtx->channels;
+//    wanted_spec.silence = 0;
+//    wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
+//    wanted_spec.callback = audio_callback;
+//    wanted_spec.userdata = &mVideoState;
+//    if(SDL_OpenAudio(&wanted_spec, &spec) < 0)
+//    {
+//        fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
+//        return;
+//    }
+//    SDL_UnlockAudio();
+//    SDL_PauseAudio(0);
+//    ///  打开SDL播放设备 - 结束
+
+    /*  打开音频设备,这里使用一个while来循环尝试打开不同的声道数(由上面 */
+    /*  next_nb_channels数组指定)直到成功打开,或者全部失败 */
+//    while (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
+    do {
+
+        is->audioID = SDL_OpenAudioDevice(SDL_GetAudioDeviceName(0,0),0,&wanted_spec, &spec,0);
+
+//        qDebug()<<"audioID"<<audioID;
+
+//        if (audioID >= 1) break;
+
+        fprintf(stderr,"SDL_OpenAudio (%d channels): %s\n",wanted_spec.channels, SDL_GetError());
+        qDebug()<<QString("SDL_OpenAudio (%1 channels): %2").arg(wanted_spec.channels).arg(SDL_GetError());
+        wanted_spec.channels = next_nb_channels[FFMIN(7, wanted_spec.channels)];
+        if (!wanted_spec.channels) {
+            fprintf(stderr,"No more channel combinations to tyu, audio open failed\n");
+//            return -1;
+            break;
+        }
+        wanted_channel_layout = av_get_default_channel_layout(
+                wanted_spec.channels);
+    }while(is->audioID == 0);
+
+    /* 检查实际使用的配置(保存在spec,由SDL_OpenAudio()填充) */
+    if (spec.format != AUDIO_S16SYS) {
+        fprintf(stderr,"SDL advised audio format %d is not supported!\n",spec.format);
+        return -1;
+    }
+
+    if (spec.channels != wanted_spec.channels) {
+        wanted_channel_layout = av_get_default_channel_layout(spec.channels);
+        if (!wanted_channel_layout) {
+            fprintf(stderr,"SDL advised channel count %d is not supported!\n",spec.channels);
+            return -1;
+        }
+    }
+
+    is->audio_hw_buf_size = spec.size;
+
+    /* 把设置好的参数保存到大结构中 */
+    is->audio_src_fmt = is->audio_tgt_fmt = AV_SAMPLE_FMT_S16;
+    is->audio_src_freq = is->audio_tgt_freq = spec.freq;
+    is->audio_src_channel_layout = is->audio_tgt_channel_layout =
+            wanted_channel_layout;
+    is->audio_src_channels = is->audio_tgt_channels = spec.channels;
+
+    codec = avcodec_find_decoder(codecCtx->codec_id);
+    if (!codec || (avcodec_open2(codecCtx, codec, NULL) < 0)) {
+        fprintf(stderr,"Unsupported codec!\n");
+        return -1;
+    }
+    ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
+    switch (codecCtx->codec_type) {
+    case AVMEDIA_TYPE_AUDIO:
+//        is->audioStream = stream_index;
+        is->audio_st = ic->streams[stream_index];
+        is->audio_buf_size = 0;
+        is->audio_buf_index = 0;
+        memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
+        packet_queue_init(&is->audioq);
+//        SDL_PauseAudio(0); // 开始播放静音
+        SDL_PauseAudioDevice(is->audioID,0);
+        break;
+    default:
+        break;
+    }
+
+    return 0;
+}
+
+int video_thread(void *arg)
+{
+    VideoState *is = (VideoState *) arg;
+    AVPacket pkt1, *packet = &pkt1;
+
+    int ret, got_picture, numBytes;
+
+    double video_pts = 0; //当前视频的pts
+    double audio_pts = 0; //音频pts
+
+
+    ///解码视频相关
+    AVFrame *pFrame, *pFrameRGB;
+    uint8_t *out_buffer_rgb; //解码后的rgb数据
+    struct SwsContext *img_convert_ctx;  //用于解码后的视频格式转换
+
+    AVCodecContext *pCodecCtx = is->video_st->codec; //视频解码器
+
+    pFrame = av_frame_alloc();
+    pFrameRGB = av_frame_alloc();
+
+    ///这里我们改成了 将解码后的YUV数据转换成RGB32
+    img_convert_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height,
+            pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height,
+            PIX_FMT_RGB32, SWS_BICUBIC, NULL, NULL, NULL);
+
+    numBytes = avpicture_get_size(PIX_FMT_RGB32, pCodecCtx->width,pCodecCtx->height);
+
+    out_buffer_rgb = (uint8_t *) av_malloc(numBytes * sizeof(uint8_t));
+    avpicture_fill((AVPicture *) pFrameRGB, out_buffer_rgb, PIX_FMT_RGB32,
+            pCodecCtx->width, pCodecCtx->height);
+
+    while(1)
+    {
+
+        if (packet_queue_get(&is->videoq, packet, 1) <= 0) break;//队列里面没有数据了  读取完毕了
+
+        ret = avcodec_decode_video2(pCodecCtx, pFrame, &got_picture,packet);
+
+//        if (ret < 0) {
+//            printf("decode error.\n");
+//            return;
+//        }
+
+        if (packet->dts == AV_NOPTS_VALUE && pFrame->opaque&& *(uint64_t*) pFrame->opaque != AV_NOPTS_VALUE)
+        {
+            video_pts = *(uint64_t *) pFrame->opaque;
+        }
+        else if (packet->dts != AV_NOPTS_VALUE)
+        {
+            video_pts = packet->dts;
+        }
+        else
+        {
+            video_pts = 0;
+        }
+
+        video_pts *= av_q2d(is->video_st->time_base);
+        video_pts = synchronize_video(is, pFrame, video_pts);
+
+        while(1)
+        {
+            audio_pts = is->audio_clock;
+            if (video_pts <= audio_pts) break;
+
+            int delayTime = (video_pts - audio_pts) * 1000;
+
+            delayTime = delayTime > 5 ? 5:delayTime;
+
+            SDL_Delay(delayTime);
+        }
+
+        if (got_picture) {
+            sws_scale(img_convert_ctx,
+                    (uint8_t const * const *) pFrame->data,
+                    pFrame->linesize, 0, pCodecCtx->height, pFrameRGB->data,
+                    pFrameRGB->linesize);
+
+            //把这个RGB数据 用QImage加载
+            QImage tmpImg((uchar *)out_buffer_rgb,pCodecCtx->width,pCodecCtx->height,QImage::Format_RGB32);
+            QImage image = tmpImg.copy(); //把图像复制一份 传递给界面显示
+            is->player->disPlayVideo(image); //调用激发信号的函数
+        }
+
+        av_free_packet(packet);
+
+    }
+
+    av_free(pFrame);
+    av_free(pFrameRGB);
+    av_free(out_buffer_rgb);
+
+    return 0;
+}
+
 
 VideoPlayer::VideoPlayer()
 {
@@ -228,6 +580,11 @@ VideoPlayer::~VideoPlayer()
 
 }
 
+void VideoPlayer::disPlayVideo(QImage img)
+{
+    emit sig_GetOneFrame(img);  //发送信号
+}
+
 void VideoPlayer::startPlay()
 {
     ///调用 QThread 的start函数 将会自动执行下面的run函数 run函数是一个新的线程
@@ -239,27 +596,28 @@ void VideoPlayer::run()
 {
     char *file_path = mFileName.toUtf8().data();
 
+
+    av_register_all(); //初始化FFMPEG  调用了这个才能正常使用编码器和解码器
+
+    if (SDL_Init(SDL_INIT_AUDIO)) {
+        fprintf(stderr,"Could not initialize SDL - %s. \n", SDL_GetError());
+        exit(1);
+    }
+
+
+
+    VideoState *is = &mVideoState;
+
     AVFormatContext *pFormatCtx;
     AVCodecContext *pCodecCtx;
     AVCodec *pCodec;
-    AVFrame *pFrame, *pFrameRGB;
-    AVPacket *packet;
-    uint8_t *out_buffer;
+
 
     AVCodecContext *aCodecCtx;
     AVCodec *aCodec;
 
-    static struct SwsContext *img_convert_ctx;
-
-    int audioStream ,videoStream, i, numBytes;
-    int ret, got_picture;
-
-    av_register_all(); //初始化FFMPEG  调用了这个才能正常使用编码器和解码器
+    int audioStream ,videoStream, i;
 
-    if (SDL_Init(SDL_INIT_AUDIO)) {
-        fprintf(stderr,"Could not initialize SDL - %s. \n", SDL_GetError());
-        exit(1);
-    }
 
     //Allocate an AVFormatContext.
     pFormatCtx = avformat_alloc_context();
@@ -300,6 +658,13 @@ void VideoPlayer::run()
         return;
     }
 
+    is->ic = pFormatCtx;
+
+    if (audioStream >= 0) {
+        /* 所有设置SDL音频流信息的步骤都在这个函数里完成 */
+        audio_stream_component_open(&mVideoState, audioStream);
+    }
+
     ///查找音频解码器
     aCodecCtx = pFormatCtx->streams[audioStream]->codec;
     aCodec = avcodec_find_decoder(aCodecCtx->codec_id);
@@ -315,38 +680,7 @@ void VideoPlayer::run()
         return;
     }
 
-    //初始化音频队列
-    PacketQueue *audioq = new PacketQueue;
-    packet_queue_init(audioq);
-
-    // 分配解码过程中的使用缓存
-    AVFrame* audioFrame = avcodec_alloc_frame();
-
-    mVideoState.aCodecCtx = aCodecCtx;
-    mVideoState.audioq = audioq;
-    mVideoState.audioFrame = audioFrame;
-
-    ///  打开SDL播放设备 - 开始
-    SDL_LockAudio();
-    SDL_AudioSpec spec;
-    SDL_AudioSpec wanted_spec;
-    wanted_spec.freq = aCodecCtx->sample_rate;
-    wanted_spec.format = AUDIO_S16SYS;
-    wanted_spec.channels = aCodecCtx->channels;
-    wanted_spec.silence = 0;
-    wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
-    wanted_spec.callback = audio_callback;
-    wanted_spec.userdata = &mVideoState;
-    if(SDL_OpenAudio(&wanted_spec, &spec) < 0)
-    {
-        fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
-        return;
-    }
-    SDL_UnlockAudio();
-    SDL_PauseAudio(0);
-    ///  打开SDL播放设备 - 结束
-
-
+    is->audio_st = pFormatCtx->streams[audioStream];
 
     ///查找视频解码器
     pCodecCtx = pFormatCtx->streams[videoStream]->codec;
@@ -363,82 +697,45 @@ void VideoPlayer::run()
         return;
     }
 
-    mVideoState.video_st = pFormatCtx->streams[videoStream];
+    is->video_st = pFormatCtx->streams[videoStream];
+    packet_queue_init(&is->videoq);
 
-    pFrame = av_frame_alloc();
-    pFrameRGB = av_frame_alloc();
+    ///创建一个线程专门用来解码视频
+    is->video_tid = SDL_CreateThread(video_thread, "video_thread", &mVideoState);
 
-    ///这里我们改成了 将解码后的YUV数据转换成RGB32
-    img_convert_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height,
-            pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height,
-            PIX_FMT_RGB32, SWS_BICUBIC, NULL, NULL, NULL);
 
-    numBytes = avpicture_get_size(PIX_FMT_RGB32, pCodecCtx->width,pCodecCtx->height);
-
-    out_buffer = (uint8_t *) av_malloc(numBytes * sizeof(uint8_t));
-    avpicture_fill((AVPicture *) pFrameRGB, out_buffer, PIX_FMT_RGB32,
-            pCodecCtx->width, pCodecCtx->height);
+    is->player = this;
 
-    int y_size = pCodecCtx->width * pCodecCtx->height;
+//    int y_size = pCodecCtx->width * pCodecCtx->height;
 
-    packet = (AVPacket *) malloc(sizeof(AVPacket)); //分配一个packet
-    av_new_packet(packet, y_size); //分配packet的数据
+    AVPacket *packet = (AVPacket *) malloc(sizeof(AVPacket)); //分配一个packet 用来存放读取的视频
+//    av_new_packet(packet, y_size); //av_read_frame 会给它分配空间 因此这里不需要了
 
     av_dump_format(pFormatCtx, 0, file_path, 0); //输出视频信息
 
-    int64_t start_time = av_gettime();
-    int64_t pts = 0; //当前视频的pts
-
     while (1)
     {
-        if (av_read_frame(pFormatCtx, packet) < 0)
-        {
-            break; //这里认为视频读取完了
+        //这里做了个限制  当队列里面的数据超过某个大小的时候 就暂停读取  防止一下子就把视频读完了,导致的空间分配不足
+        /* 这里audioq.size是指队列中的所有数据包带的音频数据的总量或者视频数据总量,并不是包的数量 */
+        //这个值可以稍微写大一些
+        if (is->audioq.size > MAX_AUDIO_SIZE || is->videoq.size > MAX_VIDEO_SIZE) {
+            SDL_Delay(10);
+            continue;
         }
 
-        int64_t realTime = av_gettime() - start_time; //主时钟时间
-        while(pts > realTime)
+        if (av_read_frame(pFormatCtx, packet) < 0)
         {
-            SDL_Delay(10);
-            realTime = av_gettime() - start_time; //主时钟时间
+            break; //这里认为视频读取完了
         }
 
         if (packet->stream_index == videoStream)
         {
-            ret = avcodec_decode_video2(pCodecCtx, pFrame, &got_picture,packet);
-            if (packet->dts == AV_NOPTS_VALUE && pFrame->opaque&& *(uint64_t*) pFrame->opaque != AV_NOPTS_VALUE)
-            {
-                pts = *(uint64_t *) pFrame->opaque;
-            }
-            else if (packet->dts != AV_NOPTS_VALUE)
-            {
-                pts = packet->dts;
-            }
-            else
-            {
-                pts = 0;
-            }
-
-            pts *= 1000000 * av_q2d(mVideoState.video_st->time_base);
-            pts = synchronize_video(&mVideoState, pFrame, pts);
-
-            if (got_picture) {
-                sws_scale(img_convert_ctx,
-                        (uint8_t const * const *) pFrame->data,
-                        pFrame->linesize, 0, pCodecCtx->height, pFrameRGB->data,
-                        pFrameRGB->linesize);
-
-                //把这个RGB数据 用QImage加载
-                QImage tmpImg((uchar *)out_buffer,pCodecCtx->width,pCodecCtx->height,QImage::Format_RGB32);
-                QImage image = tmpImg.copy(); //把图像复制一份 传递给界面显示
-                emit sig_GetOneFrame(image);  //发送信号
-            }
-
-            av_free_packet(packet);
+            packet_queue_put(&is->videoq, packet);
+            //这里我们将数据存入队列 因此不调用 av_free_packet 释放
         }
         else if( packet->stream_index == audioStream )
         {
-            packet_queue_put(mVideoState.audioq, packet);
+            packet_queue_put(&is->audioq, packet);
             //这里我们将数据存入队列 因此不调用 av_free_packet 释放
         }
         else
@@ -446,11 +743,8 @@ void VideoPlayer::run()
             // Free the packet that was allocated by av_read_frame
             av_free_packet(packet);
         }
-
     }
 
-    av_free(out_buffer);
-    av_free(pFrameRGB);
     avcodec_close(pCodecCtx);
     avformat_close_input(&pFormatCtx);
 }

+ 41 - 6
src/videoplayer/videoplayer.h

@@ -39,14 +39,45 @@ typedef struct PacketQueue {
 #define VIDEO_PICTURE_QUEUE_SIZE 1
 #define AVCODEC_MAX_AUDIO_FRAME_SIZE 192000 // 1 second of 48khz 32bit audio
 
-typedef struct VideoState {
-    AVCodecContext *aCodecCtx; //音频解码器
-    AVFrame *audioFrame;// 解码音频过程中的使用缓存
-    PacketQueue *audioq;
+#define MAX_AUDIO_SIZE (25 * 16 * 1024)
+#define MAX_VIDEO_SIZE (25 * 256 * 1024)
+
+class VideoPlayer; //前置声明
 
+typedef struct VideoState {
+    AVFormatContext *ic;
+    AVFrame *audio_frame;// 解码音频过程中的使用缓存
+    PacketQueue audioq;
+    AVStream *audio_st; //音频流
+    unsigned int audio_buf_size;
+    unsigned int audio_buf_index;
+    AVPacket audio_pkt;
+    uint8_t *audio_pkt_data;
+    int audio_pkt_size;
+    uint8_t *audio_buf;
+    DECLARE_ALIGNED(16,uint8_t,audio_buf2) [AVCODEC_MAX_AUDIO_FRAME_SIZE * 4];
+    enum AVSampleFormat audio_src_fmt;
+    enum AVSampleFormat audio_tgt_fmt;
+    int audio_src_channels;
+    int audio_tgt_channels;
+    int64_t audio_src_channel_layout;
+    int64_t audio_tgt_channel_layout;
+    int audio_src_freq;
+    int audio_tgt_freq;
+    struct SwrContext *swr_ctx; //用于解码后的音频格式转换
+    int audio_hw_buf_size;
+
+    double audio_clock; ///音频时钟
     double video_clock; ///<pts of last decoded frame / predicted pts of next decoded frame
 
     AVStream *video_st;
+    PacketQueue videoq;
+
+
+    SDL_Thread *video_tid;  //视频线程id
+    SDL_AudioDeviceID audioID;
+
+    VideoPlayer *player; //记录下这个类的指针  主要用于在线程里面调用激发信号的函数
 
 } VideoState;
 
@@ -62,8 +93,10 @@ public:
 
     void startPlay();
 
+    void disPlayVideo(QImage img);
+
 signals:
-    void sig_GetOneFrame(QImage); //每获取到一帧图像 就发送此信号
+    void sig_GetOneFrame(QImage); //获取到一帧图像 就发送此信号
 
 protected:
     void run();
@@ -71,7 +104,9 @@ protected:
 private:
     QString mFileName;
 
-    VideoState mVideoState; //用来 传递给 SDL音频回调函数的数据
+
+
+    VideoState mVideoState;
 
 };
 

+ 2 - 5
说明.txt

@@ -1,10 +1,7 @@
 这是Qt的工程,建议使用Qt Creator 打开
 
 
-是一个Windows下使用FFMPEG解码  
-并使用QPaint绘制到QWidget上
-同时使用SDL播放声音
-并加入了声画同步
+从零开始学习音视频编程技术(九) FFMPEG Qt视频播放器之同步进阶篇
 
 FFMPEG的版本是2.5.2
 SDL的版本是2.04
@@ -13,7 +10,7 @@ SDL
 
 
 关于代码的解释 请参考:
-http://blog.yundiantech.com/?log=blog&id=11
+http://blog.yundiantech.com/?log=blog&id=12
 
 
 Qt开发环境的搭建 请参考: