|
@@ -43,6 +43,46 @@
|
|
|
// return av_interleaved_write_frame(fmt_ctx, pkt);
|
|
|
//}
|
|
|
|
|
|
+/**
|
|
|
+* Add ADTS header at the beginning of each and every AAC packet.
|
|
|
+* This is needed as MediaCodec encoder generates a packet of raw
|
|
|
+* AAC data.
|
|
|
+*
|
|
|
+* Note the packetLen must count in the ADTS header itself !!! .
|
|
|
+*注意,这里的packetLen参数为raw aac Packet Len + 7; 7 bytes adts header
|
|
|
+**/
|
|
|
+void addADTStoPacket(uint8_t* packet, int packetLen)
|
|
|
+{
|
|
|
+ int profile = 2; //AAC LC,MediaCodecInfo.CodecProfileLevel.AACObjectLC;
|
|
|
+ int freqIdx = 4; //32K, 见后面注释avpriv_mpeg4audio_sample_rates中32000对应的数组下标,来自ffmpeg源码
|
|
|
+ int chanCfg = 2; //见后面注释channel_configuration,Stero双声道立体声
|
|
|
+
|
|
|
+ /*int avpriv_mpeg4audio_sample_rates[] = {
|
|
|
+ 96000, 88200, 64000, 48000, 44100, 32000,
|
|
|
+ 24000, 22050, 16000, 12000, 11025, 8000, 7350
|
|
|
+ };
|
|
|
+ channel_configuration: 表示声道数chanCfg
|
|
|
+ 0: Defined in AOT Specifc Config
|
|
|
+ 1: 1 channel: front-center
|
|
|
+ 2: 2 channels: front-left, front-right
|
|
|
+ 3: 3 channels: front-center, front-left, front-right
|
|
|
+ 4: 4 channels: front-center, front-left, front-right, back-center
|
|
|
+ 5: 5 channels: front-center, front-left, front-right, back-left, back-right
|
|
|
+ 6: 6 channels: front-center, front-left, front-right, back-left, back-right, LFE-channel
|
|
|
+ 7: 8 channels: front-center, front-left, front-right, side-left, side-right, back-left, back-right, LFE-channel
|
|
|
+ 8-15: Reserved
|
|
|
+ */
|
|
|
+
|
|
|
+ // fill in ADTS data
|
|
|
+ packet[0] = (uint8_t)0xFF;
|
|
|
+ packet[1] = (uint8_t)0xF9;
|
|
|
+ packet[2] = (uint8_t)(((profile-1)<<6) + (freqIdx<<2) +(chanCfg>>2));
|
|
|
+ packet[3] = (uint8_t)(((chanCfg&3)<<6) + (packetLen>>11));
|
|
|
+ packet[4] = (uint8_t)((packetLen&0x7FF) >> 3);
|
|
|
+ packet[5] = (uint8_t)(((packetLen&7)<<5) + 0x1F);
|
|
|
+ packet[6] = (uint8_t)0xFC;
|
|
|
+}
|
|
|
+
|
|
|
SaveVideoFileThread::SaveVideoFileThread()
|
|
|
{
|
|
|
isStop = false;
|
|
@@ -250,7 +290,7 @@ void SaveVideoFileThread::add_audio_stream(OutputStream *ost, AVFormatContext *o
|
|
|
AVCodec **codec,
|
|
|
enum AVCodecID codec_id)
|
|
|
{
|
|
|
- AVCodecContext *c;
|
|
|
+ AVCodecContext *aCodecCtx;
|
|
|
int i;
|
|
|
|
|
|
/* find the video encoder */
|
|
@@ -267,175 +307,102 @@ void SaveVideoFileThread::add_audio_stream(OutputStream *ost, AVFormatContext *o
|
|
|
}
|
|
|
|
|
|
ost->st->id = oc->nb_streams-1;
|
|
|
- c = avcodec_alloc_context3(*codec);
|
|
|
- if (!c) {
|
|
|
+
|
|
|
+ const AVCodec* aCodec = *codec;
|
|
|
+
|
|
|
+ aCodecCtx = avcodec_alloc_context3(aCodec);
|
|
|
+ if (!aCodecCtx)
|
|
|
+ {
|
|
|
fprintf(stderr, "Could not alloc an encoding context\n");
|
|
|
exit(1);
|
|
|
}
|
|
|
- ost->enc = c;
|
|
|
|
|
|
- c->sample_fmt = (*codec)->sample_fmts ? (*codec)->sample_fmts[0] : AV_SAMPLE_FMT_FLTP;
|
|
|
- c->bit_rate = 64000;
|
|
|
- c->sample_rate = 44100;
|
|
|
+ ///先用这句话找出 aac编码器支持的 sample_fmt
|
|
|
+ /// 我找出的是 AV_SAMPLE_FMT_FLTP
|
|
|
+ const enum AVSampleFormat *p = aCodec->sample_fmts;
|
|
|
+ fprintf(stderr, "aac encoder sample format is: %s \n",av_get_sample_fmt_name(*p));
|
|
|
+
|
|
|
+ ost->enc = aCodecCtx;
|
|
|
+
|
|
|
+// aCodecCtx->codec_type = AVMEDIA_TYPE_AUDIO;
|
|
|
+ aCodecCtx->sample_fmt = AV_SAMPLE_FMT_FLTP;
|
|
|
+ aCodecCtx->sample_rate= 44100;
|
|
|
+ aCodecCtx->channels = 2;
|
|
|
+ aCodecCtx->channel_layout=av_get_default_channel_layout(aCodecCtx->channels);
|
|
|
+
|
|
|
+// aCodecCtx->channels = av_get_channel_layout_nb_channels(aCodecCtx->channel_layout);
|
|
|
+// aCodecCtx->channel_layout = AV_CH_LAYOUT_STEREO;
|
|
|
+
|
|
|
+// aCodecCtx->profile=FF_PROFILE_AAC_LOW; //(可参考AAC格式简介)
|
|
|
+// aCodecCtx->strict_std_compliance = FF_COMPLIANCE_EXPERIMENTAL;
|
|
|
|
|
|
- if ((*codec)->supported_samplerates) {
|
|
|
- c->sample_rate = (*codec)->supported_samplerates[0];
|
|
|
- for (i = 0; (*codec)->supported_samplerates[i]; i++) {
|
|
|
+// aCodecCtx->bit_rate = 64000;
|
|
|
+
|
|
|
+#if 0
|
|
|
+ if ((*codec)->supported_samplerates)
|
|
|
+ {
|
|
|
+ aCodecCtx->sample_rate = (*codec)->supported_samplerates[0];
|
|
|
+
|
|
|
+ for (i = 0; (*codec)->supported_samplerates[i]; i++)
|
|
|
+ {
|
|
|
if ((*codec)->supported_samplerates[i] == 44100)
|
|
|
- c->sample_rate = 44100;
|
|
|
+ aCodecCtx->sample_rate = 44100;
|
|
|
}
|
|
|
}
|
|
|
- c->channels = av_get_channel_layout_nb_channels(c->channel_layout);
|
|
|
- c->channel_layout = AV_CH_LAYOUT_STEREO;
|
|
|
+
|
|
|
if ((*codec)->channel_layouts)
|
|
|
{
|
|
|
- c->channel_layout = (*codec)->channel_layouts[0];
|
|
|
+ aCodecCtx->channel_layout = (*codec)->channel_layouts[0];
|
|
|
for (i = 0; (*codec)->channel_layouts[i]; i++)
|
|
|
{
|
|
|
if ((*codec)->channel_layouts[i] == AV_CH_LAYOUT_STEREO)
|
|
|
- c->channel_layout = AV_CH_LAYOUT_STEREO;
|
|
|
+ aCodecCtx->channel_layout = AV_CH_LAYOUT_STEREO;
|
|
|
}
|
|
|
}
|
|
|
- c->channels = av_get_channel_layout_nb_channels(c->channel_layout);
|
|
|
- ost->st->time_base.num = 1; // = (AVRational){ 1, c->sample_rate };
|
|
|
- ost->st->time_base.den = c->sample_rate;
|
|
|
-
|
|
|
-//qDebug()<<__FUNCTION__<<c<<c->codec<<c->codec_id;
|
|
|
-//qDebug()<<__FUNCTION__<<ost->enc<<ost->enc->codec<<ost->enc->codec_id;
|
|
|
-// c->codec_id = codec_id;
|
|
|
-// c->codec_type = AVMEDIA_TYPE_AUDIO;
|
|
|
-
|
|
|
-// /* put sample parameters */
|
|
|
-// c->sample_fmt = AV_SAMPLE_FMT_S16;
|
|
|
-// c->bit_rate = 64000;
|
|
|
-// c->sample_rate = 44100;
|
|
|
-// c->channels = 2;
|
|
|
-//qDebug()<<__FUNCTION__<<ost->enc<<ost->enc->codec<<ost->enc->codec_id;
|
|
|
-//// c->bit_rate = 9600;
|
|
|
-//// c->sample_rate = 11025;
|
|
|
-//// c->channels = 1;
|
|
|
-
|
|
|
-}
|
|
|
-
|
|
|
-static AVFrame *alloc_audio_frame(enum AVSampleFormat sample_fmt,
|
|
|
- uint64_t channel_layout,
|
|
|
- int sample_rate, int nb_samples)
|
|
|
-{
|
|
|
- AVFrame *frame = av_frame_alloc();
|
|
|
- int ret;
|
|
|
-
|
|
|
- if (!frame) {
|
|
|
- fprintf(stderr, "Error allocating an audio frame\n");
|
|
|
- exit(1);
|
|
|
- }
|
|
|
-
|
|
|
- frame->format = sample_fmt;
|
|
|
- frame->channel_layout = channel_layout;
|
|
|
- frame->sample_rate = sample_rate;
|
|
|
- frame->nb_samples = nb_samples;
|
|
|
+ aCodecCtx->channels = av_get_channel_layout_nb_channels(aCodecCtx->channel_layout);
|
|
|
+#endif
|
|
|
|
|
|
- if (nb_samples) {
|
|
|
- ret = av_frame_get_buffer(frame, 0);
|
|
|
- if (ret < 0) {
|
|
|
- fprintf(stderr, "Error allocating an audio buffer\n");
|
|
|
- exit(1);
|
|
|
- }
|
|
|
- }
|
|
|
+ ost->st->time_base.num = 1; // = (AVRational){ 1, c->sample_rate };
|
|
|
+ ost->st->time_base.den = aCodecCtx->sample_rate;
|
|
|
|
|
|
- return frame;
|
|
|
}
|
|
|
|
|
|
void SaveVideoFileThread::open_audio(AVFormatContext *oc, AVCodec *codec, OutputStream *ost)
|
|
|
{
|
|
|
- AVCodecContext *c;
|
|
|
- int nb_samples;
|
|
|
- int ret;
|
|
|
-
|
|
|
- c = ost->enc;
|
|
|
+ AVCodecContext *aCodecCtx = ost->enc;
|
|
|
|
|
|
/* open it */
|
|
|
- if (avcodec_open2(c, codec, NULL) < 0) {
|
|
|
+ if (avcodec_open2(aCodecCtx, codec, NULL) < 0)
|
|
|
+ {
|
|
|
qDebug("could not open codec\n");
|
|
|
exit(1);
|
|
|
}
|
|
|
|
|
|
- /* init signal generator */
|
|
|
- ost->t = 0;
|
|
|
- ost->tincr = 2 * M_PI * 110.0 / c->sample_rate;
|
|
|
- /* increment frequency by 110 Hz per second */
|
|
|
- ost->tincr2 = 2 * M_PI * 110.0 / c->sample_rate / c->sample_rate;
|
|
|
+ mONEFrameSize = av_samples_get_buffer_size(NULL, aCodecCtx->channels, aCodecCtx->frame_size, aCodecCtx->sample_fmt, 1);
|
|
|
|
|
|
-// if (c->codec->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE)
|
|
|
-// nb_samples = 10000;
|
|
|
-// else
|
|
|
-// nb_samples = c->frame_size;
|
|
|
+ ost->frame = av_frame_alloc();
|
|
|
+ ost->frameBuffer = (uint8_t *)av_malloc(mONEFrameSize);
|
|
|
+ ost->frameBufferSize = mONEFrameSize;
|
|
|
|
|
|
- if (codec->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE)
|
|
|
- nb_samples = 10000;
|
|
|
- else
|
|
|
- nb_samples = c->frame_size;
|
|
|
+ ///这句话必须要(设置这个frame里面的采样点个数)
|
|
|
+ int oneChannelBufferSize = mONEFrameSize / aCodecCtx->channels; //计算出一个声道的数据
|
|
|
+ int nb_samplesize = oneChannelBufferSize / av_get_bytes_per_sample(aCodecCtx->sample_fmt); //计算出采样点个数
|
|
|
+ ost->frame->nb_samples = nb_samplesize;
|
|
|
|
|
|
- ost->frame = alloc_audio_frame(c->sample_fmt, c->channel_layout,
|
|
|
- c->sample_rate, nb_samples);
|
|
|
- ost->tmp_frame = alloc_audio_frame(AV_SAMPLE_FMT_S16, c->channel_layout,
|
|
|
- c->sample_rate, nb_samples);
|
|
|
+ ///这2种方式都可以
|
|
|
+// avcodec_fill_audio_frame(ost->frame, aCodecCtx->channels, aCodecCtx->sample_fmt,(const uint8_t*)ost->frameBuffer, mONEFrameSize, 0);
|
|
|
+ av_samples_fill_arrays(ost->frame->data, ost->frame->linesize, ost->frameBuffer, aCodecCtx->channels, ost->frame->nb_samples, aCodecCtx->sample_fmt, 0);
|
|
|
|
|
|
- ost->frameBuffer = (uint8_t *) av_malloc(AVCODEC_MAX_AUDIO_FRAME_SIZE);
|
|
|
- ost->frameBufferSize = 0;
|
|
|
+ ost->tmp_frame = nullptr;
|
|
|
|
|
|
/* copy the stream parameters to the muxer */
|
|
|
- ret = avcodec_parameters_from_context(ost->st->codecpar, c);
|
|
|
- if (ret < 0) {
|
|
|
+ int ret = avcodec_parameters_from_context(ost->st->codecpar, aCodecCtx);
|
|
|
+ if (ret < 0)
|
|
|
+ {
|
|
|
fprintf(stderr, "Could not copy the stream parameters\n");
|
|
|
exit(1);
|
|
|
}
|
|
|
|
|
|
-
|
|
|
- /* create resampler context */
|
|
|
- ost->swr_ctx = swr_alloc();
|
|
|
- if (!ost->swr_ctx) {
|
|
|
- fprintf(stderr, "Could not allocate resampler context\n");
|
|
|
- exit(1);
|
|
|
- }
|
|
|
-
|
|
|
- /* set options */
|
|
|
- av_opt_set_int (ost->swr_ctx, "in_channel_count", c->channels, 0);
|
|
|
- av_opt_set_int (ost->swr_ctx, "in_sample_rate", c->sample_rate, 0);
|
|
|
- av_opt_set_sample_fmt(ost->swr_ctx, "in_sample_fmt", AV_SAMPLE_FMT_S16, 0);
|
|
|
- av_opt_set_int (ost->swr_ctx, "out_channel_count", c->channels, 0);
|
|
|
- av_opt_set_int (ost->swr_ctx, "out_sample_rate", c->sample_rate, 0);
|
|
|
- av_opt_set_sample_fmt(ost->swr_ctx, "out_sample_fmt", c->sample_fmt, 0);
|
|
|
-
|
|
|
- /* initialize the resampling context */
|
|
|
- if ((ret = swr_init(ost->swr_ctx)) < 0) {
|
|
|
- fprintf(stderr, "Failed to initialize the resampling context\n");
|
|
|
- exit(1);
|
|
|
- }
|
|
|
-
|
|
|
- audio_input_frame_size = nb_samples * 4;
|
|
|
-
|
|
|
-}
|
|
|
-
|
|
|
-/* Prepare a 16 bit dummy audio frame of 'frame_size' samples and
|
|
|
- * 'nb_channels' channels. */
|
|
|
-static AVFrame *get_audio_frame(OutputStream *ost)
|
|
|
-{
|
|
|
- AVFrame *frame = ost->tmp_frame;
|
|
|
- int j, i, v;
|
|
|
- int16_t *q = (int16_t*)frame->data[0];
|
|
|
-
|
|
|
- for (j = 0; j <frame->nb_samples; j++) {
|
|
|
- v = (int)(sin(ost->t) * 10000);
|
|
|
- for (i = 0; i < ost->enc->channels; i++)
|
|
|
- *q++ = v;
|
|
|
- ost->t += ost->tincr;
|
|
|
- ost->tincr += ost->tincr2;
|
|
|
- }
|
|
|
-
|
|
|
- frame->pts = ost->next_pts;
|
|
|
- ost->next_pts += frame->nb_samples;
|
|
|
-
|
|
|
- return frame;
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -445,93 +412,78 @@ static AVFrame *get_audio_frame(OutputStream *ost)
|
|
|
//static int write_audio_frame(AVFormatContext *oc, OutputStream *ost)
|
|
|
bool SaveVideoFileThread::write_audio_frame(AVFormatContext *oc, OutputStream *ost)
|
|
|
{
|
|
|
- AVCodecContext *c;
|
|
|
- AVPacket pkt = { 0 }; // data and size must be 0;
|
|
|
- AVFrame *frame;
|
|
|
- int ret = 0;
|
|
|
- int got_packet;
|
|
|
- int dst_nb_samples;
|
|
|
+ AVCodecContext *aCodecCtx = ost->enc;
|
|
|
|
|
|
- c = ost->enc;
|
|
|
+ AVPacket pkt;
|
|
|
+ av_init_packet(&pkt);
|
|
|
+
|
|
|
+ AVPacket *packet = &pkt;
|
|
|
|
|
|
-#if 1
|
|
|
+ AVFrame *aFrame;
|
|
|
|
|
|
BufferDataNode node;
|
|
|
|
|
|
if (audioDataQuene_get(node))
|
|
|
{
|
|
|
- frame = ost->frame;
|
|
|
- // memset(frame->data[0], 0x0, frame->nb_samples);
|
|
|
- memcpy(frame->data[0], node.buffer, frame->nb_samples);
|
|
|
- // memcpy(frame->data[0], node.buffer, node.bufferSize);
|
|
|
+ aFrame = ost->frame;
|
|
|
+
|
|
|
+ memcpy(ost->frameBuffer, node.buffer, node.bufferSize);
|
|
|
+
|
|
|
free(node.buffer);
|
|
|
|
|
|
- frame->pts = ost->next_pts;
|
|
|
- ost->next_pts += frame->nb_samples;
|
|
|
+ aFrame->pts = ost->next_pts;
|
|
|
+ ost->next_pts += aFrame->nb_samples;
|
|
|
}
|
|
|
else
|
|
|
{
|
|
|
return false;
|
|
|
}
|
|
|
|
|
|
-#else
|
|
|
- frame = get_audio_frame(ost); //自动生成音频数据
|
|
|
-#endif
|
|
|
+ if (aFrame)
|
|
|
+ {
|
|
|
+ AVRational rational;
|
|
|
+ rational.num = 1;
|
|
|
+ rational.den = aCodecCtx->sample_rate;
|
|
|
+ aFrame->pts = av_rescale_q(ost->samples_count, rational, aCodecCtx->time_base);
|
|
|
+ ost->samples_count += aFrame->nb_samples;
|
|
|
+ }
|
|
|
|
|
|
- if (frame)
|
|
|
+ /* send the frame for encoding */
|
|
|
+ int ret = avcodec_send_frame(aCodecCtx, aFrame);
|
|
|
+ if (ret < 0)
|
|
|
{
|
|
|
- /* convert samples from native format to destination codec format, using the resampler */
|
|
|
- /* compute destination number of samples */
|
|
|
- dst_nb_samples = av_rescale_rnd(swr_get_delay(ost->swr_ctx, c->sample_rate) + frame->nb_samples,
|
|
|
- c->sample_rate, c->sample_rate, AV_ROUND_UP);
|
|
|
- av_assert0(dst_nb_samples == frame->nb_samples);
|
|
|
-
|
|
|
-// /* when we pass a frame to the encoder, it may keep a reference to it
|
|
|
-// * internally;
|
|
|
-// * make sure we do not overwrite it here
|
|
|
-// */
|
|
|
-// ret = av_frame_make_writable(ost->frame);
|
|
|
-// if (ret < 0)
|
|
|
-// exit(1);
|
|
|
+ fprintf(stderr, "Error sending the frame to the audio encoder\n");
|
|
|
+ return false;
|
|
|
+ }
|
|
|
|
|
|
- /* convert to destination format */
|
|
|
- ret = swr_convert(ost->swr_ctx,
|
|
|
- ost->frame->data, dst_nb_samples,
|
|
|
- (const uint8_t **)frame->data, frame->nb_samples);
|
|
|
- if (ret < 0)
|
|
|
+ /* read all the available output packets (in general there may be any
|
|
|
+ * number of them */
|
|
|
+ while (ret >= 0)
|
|
|
+ {
|
|
|
+ ret = avcodec_receive_packet(aCodecCtx, packet);
|
|
|
+
|
|
|
+ if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF || ret < 0)
|
|
|
{
|
|
|
char errstr[AV_ERROR_MAX_STRING_SIZE] = {0};
|
|
|
av_make_error_string(errstr, AV_ERROR_MAX_STRING_SIZE, ret);
|
|
|
- QString logStr = QString("!!!!!!!!!! Error while converting: %1 ret=%2")
|
|
|
+ QString logStr = QString("!!!!!!!!!! Error encoding audio frame: %1 ret=%2")
|
|
|
.arg(QString(errstr))
|
|
|
.arg(ret);
|
|
|
AppConfig::WriteLog(logStr);
|
|
|
+ return false;
|
|
|
}
|
|
|
- frame = ost->frame;
|
|
|
-
|
|
|
- AVRational rational;
|
|
|
- rational.num = 1;
|
|
|
- rational.den = c->sample_rate;
|
|
|
- frame->pts = av_rescale_q(ost->samples_count, rational, c->time_base);
|
|
|
- ost->samples_count += dst_nb_samples;
|
|
|
- }
|
|
|
-
|
|
|
- ret = avcodec_encode_audio2(c, &pkt, frame, &got_packet);
|
|
|
|
|
|
- if (ret < 0)
|
|
|
- {
|
|
|
- char errstr[AV_ERROR_MAX_STRING_SIZE] = {0};
|
|
|
- av_make_error_string(errstr, AV_ERROR_MAX_STRING_SIZE, ret);
|
|
|
- QString logStr = QString("!!!!!!!!!! Error encoding audio frame: %1 ret=%2")
|
|
|
- .arg(QString(errstr))
|
|
|
- .arg(ret);
|
|
|
- AppConfig::WriteLog(logStr);
|
|
|
- }
|
|
|
+#if 0 ///写入aac文件
|
|
|
+ uint8_t * aac_buf = (uint8_t *)malloc(packet->size+7);
|
|
|
+ addADTStoPacket(aac_buf, 7+packet->size);
|
|
|
+ memcpy(aac_buf+7, packet->data, packet->size);
|
|
|
+ static FILE *aacFp = fopen("out22.aac", "wb");
|
|
|
+ fwrite(aac_buf,1,packet->size+7,aacFp);
|
|
|
+#endif
|
|
|
|
|
|
- if (got_packet)
|
|
|
- {
|
|
|
+ ////
|
|
|
/* rescale output packet timestamp values from codec to stream timebase */
|
|
|
- av_packet_rescale_ts(&pkt, c->time_base, ost->st->time_base);
|
|
|
+ av_packet_rescale_ts(&pkt, aCodecCtx->time_base, ost->st->time_base);
|
|
|
pkt.stream_index = ost->st->index;
|
|
|
|
|
|
// audio_pts = pkt.pts;
|
|
@@ -552,19 +504,11 @@ bool SaveVideoFileThread::write_audio_frame(AVFormatContext *oc, OutputStream *o
|
|
|
AppConfig::WriteLog(logStr);
|
|
|
}
|
|
|
|
|
|
- av_packet_unref(&pkt);
|
|
|
+ av_packet_unref(packet);
|
|
|
+ break;
|
|
|
}
|
|
|
|
|
|
-//qDebug()<<__FUNCTION__<<"1111 99999";
|
|
|
-
|
|
|
- if (ret < 0)
|
|
|
- {
|
|
|
- return false;
|
|
|
- }
|
|
|
- else
|
|
|
- {
|
|
|
- return (frame || got_packet) ? 0 : 1;
|
|
|
- }
|
|
|
+ return true;
|
|
|
|
|
|
}
|
|
|
|
|
@@ -572,9 +516,9 @@ void SaveVideoFileThread::close_audio(AVFormatContext *oc, OutputStream *ost)
|
|
|
{
|
|
|
avcodec_free_context(&ost->enc);
|
|
|
av_frame_free(&ost->frame);
|
|
|
+
|
|
|
+ if (ost->tmp_frame != nullptr)
|
|
|
av_frame_free(&ost->tmp_frame);
|
|
|
- sws_freeContext(ost->sws_ctx);
|
|
|
- swr_free(&ost->swr_ctx);
|
|
|
|
|
|
if (ost->frameBuffer != NULL)
|
|
|
{
|
|
@@ -854,7 +798,7 @@ bool SaveVideoFileThread::write_video_frame(AVFormatContext *oc, OutputStream *o
|
|
|
|
|
|
memcpy(ost->frameBuffer, node->buffer, node->bufferSize);
|
|
|
|
|
|
-//不止为何下面这两种方式都不行
|
|
|
+//不知为何下面这两种方式都不行
|
|
|
// int y_size = c->width * c->height;
|
|
|
// memcpy(ost->frame->data[0], node->buffer, y_size * 3 / 2);
|
|
|
|
|
@@ -888,7 +832,6 @@ bool SaveVideoFileThread::write_video_frame(AVFormatContext *oc, OutputStream *o
|
|
|
///将Pts转换成毫秒的形式,这里pts仅仅用于显示,不会修改写入文件的pts
|
|
|
video_pts = av_rescale_q(pkt.pts, ost->st->time_base, {1, 1000});
|
|
|
|
|
|
-
|
|
|
/* Write the compressed frame to the media file. */
|
|
|
ret = av_interleaved_write_frame(oc, &pkt);
|
|
|
if (ret < 0)
|
|
@@ -912,10 +855,9 @@ void SaveVideoFileThread::close_video(AVFormatContext *oc, OutputStream *ost)
|
|
|
{
|
|
|
avcodec_free_context(&ost->enc);
|
|
|
av_frame_free(&ost->frame);
|
|
|
+
|
|
|
if (ost->tmp_frame != NULL)
|
|
|
av_frame_free(&ost->tmp_frame);
|
|
|
- sws_freeContext(ost->sws_ctx);
|
|
|
- swr_free(&ost->swr_ctx);
|
|
|
|
|
|
if (ost->frameBuffer != NULL)
|
|
|
{
|
|
@@ -1062,7 +1004,7 @@ while(1)
|
|
|
|
|
|
while(1)
|
|
|
{
|
|
|
-// qDebug()<<video_st.next_pts<<audio_st.next_pts<<video_pts<<audio_pts;
|
|
|
+// qDebug()<<__FUNCTION__<<video_st.next_pts<<audio_st.next_pts<<video_pts<<audio_pts;
|
|
|
|
|
|
/* select the stream to encode */
|
|
|
if (!have_audio || (av_compare_ts(video_st.next_pts, video_st.enc->time_base, audio_st.next_pts, audio_st.enc->time_base) <= 0))
|