为什么FFmpeg屏幕录制的视频速度快?
我参考this问题用FFmpeg lib创建了一个屏幕录制器。但问题是录制的视频太快了。如果我录制屏幕20秒,我会得到不到10秒的视频。视频内容完整,但播放速度太快,需要6到10秒才能完成。 我尝试将数据包和帧的PTS值更改为以毫秒为单位的已用时间。
outPacket->pts=timer->ElapsedTimeM()*90 and outFrame->pts=timer->ElapsedTimeM()*90
现在我得到的视频长度大致正确(19秒)。但这段视频的帧速率只有16到15fps。但我预计的帧速率是30fps。
我通过计算while (av_read_frame(ifmt_ctx, av_pkt) >= 0)
循环运行的次数来计算帧,我得到了40次(对于20秒的视频)。我认为并不是所有的帧都传递给编码器,因为对于30帧/秒的20秒视频,它应该至少包含(30*20)600帧。
所以我的问题是,我如何制作一段30fps的全长视频,并且可以正常速度播放?
我的更新代码如下
#define __STDC_CONSTANT_MACROS
#include<iostream>
#include <chrono>
extern "C"
{
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
#include <libavdevice/avdevice.h>
#include <libavutil/imgutils.h>
#include <libswscale/swscale.h>
}
using namespace std;
class Timer
{
chrono::time_point<chrono::system_clock> start;
public:
Timer() {};
void StartTimer()
{
start = chrono::system_clock::now();
};
__int64 ElapsedTime()
{
return chrono::duration_cast<chrono::seconds>(chrono::system_clock::now() - start).count();
}
__int64 ElapsedTimeM()
{
return chrono::duration_cast<chrono::milliseconds>(chrono::system_clock::now() - start).count();
}
};
int main(int argc, char** argv)
{
const char* out_filename = "new_out.mp4";
avdevice_register_all();
AVOutputFormat* ofmt = NULL;
AVInputFormat* ifmt = NULL;
AVFormatContext* ifmt_ctx = avformat_alloc_context();
AVFormatContext* ofmt_ctx = avformat_alloc_context();
AVCodecParameters* av_codec_par_in = avcodec_parameters_alloc();
AVCodecParameters* av_codec_par_out = avcodec_parameters_alloc();
AVCodecContext* avcodec_contx = NULL;
AVCodec* av_codec;
AVStream* video_stream = NULL;
av_codec_par_out->height = 600;
av_codec_par_out->width = 800;
av_codec_par_out->bit_rate = 40000;
av_codec_par_out->codec_id = AV_CODEC_ID_H264; //AV_CODEC_ID_MPEG4; //Try H.264 instead of MPEG4
av_codec_par_out->codec_type = AVMEDIA_TYPE_VIDEO;
av_codec_par_out->format = 0;
av_codec_par_out->sample_aspect_ratio.den = 4;
av_codec_par_out->sample_aspect_ratio.num = 6;
AVDictionary* options = NULL;
av_dict_set(&options, "framerate", "30", 0);
/* av_dict_set(&options, "offset_x", "20", 0);
av_dict_set(&options, "offset_y", "40", 0);*/
av_dict_set(&options, "video_size", "800x600", 0);
av_dict_set(&options, "probesize", "42M", 0);
av_dict_set(&options, "rtbufsize", "100M", 0);
av_dict_set(&options, "preset", "ultrafast", 0);
//int ret, i;
ifmt = av_find_input_format("gdigrab");
if (avformat_open_input(&ifmt_ctx, "desktop", ifmt, &options) < 0)
{
cout << "Error in opening file";
exit(1);
}
int VideoStreamIndx = -1;
avformat_find_stream_info(ifmt_ctx, NULL);
/* find the first video stream index . Also there is an API available to do the below operations */
for (int i = 0; i < (int)ifmt_ctx->nb_streams; i++) // find video stream position/index.
{
if (ifmt_ctx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO)
{
VideoStreamIndx = i;
break;
}
}
if (VideoStreamIndx == -1)
{
cout << "
unable to find the video stream index. (-1)";
exit(1);
}
av_codec_par_in = ifmt_ctx->streams[VideoStreamIndx]->codecpar;
av_codec = avcodec_find_decoder(av_codec_par_in->codec_id);
if (av_codec == NULL)
{
cout << "
unable to find the decoder";
exit(1);
}
avcodec_contx = avcodec_alloc_context3(av_codec);
//Consider using preset and crf
av_opt_set(avcodec_contx->priv_data, "preset", "ultrafast", 0);
//av_opt_set(avcodec_contx->priv_data, "crf", "18", 0);
if (avcodec_parameters_to_context(avcodec_contx, av_codec_par_in) < 0)
{
cout << "
error in converting the codec contexts";
exit(1);
}
//av_dict_set
int value = avcodec_open2(avcodec_contx, av_codec, NULL);//Initialize the AVCodecContext to use the given AVCodec.
if (value < 0)
{
cout << "
unable to open the av codec";
exit(1);
}
value = 0;
ofmt = av_guess_format(NULL, out_filename, NULL);
if (!ofmt)
{
cout << "
error in guessing the video format. try with correct format";
exit(1);
}
avformat_alloc_output_context2(&ofmt_ctx, ofmt, NULL, out_filename);
if (!ofmt_ctx)
{
cout << "
error in allocating av format output context";
exit(1);
}
AVCodec* av_codec_out = avcodec_find_encoder(av_codec_par_out->codec_id);
if (av_codec_out == NULL)
{
cout << "
unable to find the encoder";
exit(1);
}
video_stream = avformat_new_stream(ofmt_ctx, av_codec_out);
if (!video_stream)
{
cout << "
error in creating a av format new stream";
exit(1);
}
AVCodecContext* av_cntx_out;
av_cntx_out = avcodec_alloc_context3(av_codec_out);
if (!av_cntx_out)
{
cout << "
error in allocating the codec contexts";
exit(1);
}
if (avcodec_parameters_copy(video_stream->codecpar, av_codec_par_out) < 0)
{
cout << "
Codec parameter canot copied";
exit(1);
}
if (avcodec_parameters_to_context(av_cntx_out, av_codec_par_out) < 0)
{
cout << "
error in converting the codec contexts";
exit(1);
}
//av_cntx_out->pix_fmt = AV_PIX_FMT_YUV420P;
av_cntx_out->gop_size = 30;//3; //Use I-Frame frame every second.
av_cntx_out->max_b_frames = 2;
av_cntx_out->time_base.num = 1;
av_cntx_out->time_base.den = 30;
// av_cntx_out->ticks_per_frame = 10;
value = avcodec_open2(av_cntx_out, av_codec_out, NULL);//Initialize the AVCodecContext to use the given AVCodec.
if (value < 0)
{
cout << "
unable to open the av codec";
exit(1);
}
if (avcodec_contx->codec_id == AV_CODEC_ID_H264)
{
av_opt_set(av_cntx_out->priv_data, "preset", "ultrafast", 0);
}
avio_open(&ofmt_ctx->pb, out_filename, AVIO_FLAG_READ_WRITE);
if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER)
{
av_cntx_out->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
}
if (avformat_write_header(ofmt_ctx, NULL) < 0)
{
cout << "
error in writing the header context";
exit(1);
}
AVPacket* av_pkt = av_packet_alloc();
//av_init_packet(av_pkt); //error C4996: 'av_init_packet': was declared deprecated
memset(av_pkt, 0, sizeof(AVPacket)); //???
AVFrame* av_frame = av_frame_alloc();
if (!av_frame)
{
cout << "
unable to release the avframe resources";
exit(1);
}
AVFrame* outFrame = av_frame_alloc();//Allocate an AVFrame and set its fields to default values.
if (!outFrame)
{
cout << "
unable to release the avframe resources for outframe";
exit(1);
}
av_frame->width = avcodec_contx->width;
av_frame->height = avcodec_contx->height;
av_frame->format = av_codec_par_in->format;
outFrame->width = av_cntx_out->width;
outFrame->height = av_cntx_out->height;
outFrame->format = av_codec_par_out->format;
av_frame_get_buffer(av_frame, 0);
av_frame_get_buffer(outFrame, 0);
SwsContext* swsCtx = sws_alloc_context();
if (sws_init_context(swsCtx, NULL, NULL) < 0)
{
cout << "
Unable to Initialize the swscaler context sws_context.";
exit(1);
}
swsCtx = sws_getContext(avcodec_contx->width, avcodec_contx->height, avcodec_contx->pix_fmt,
av_cntx_out->width, av_cntx_out->height, av_cntx_out->pix_fmt,
SWS_FAST_BILINEAR, NULL, NULL, NULL);
if (swsCtx == NULL)
{
cout << "
Cannot allocate SWC Context";
exit(1);
}
int ii = 0;
int enc_packet_counter = 0; //Count encoded frames.
int no_frames = 100;
/* cout << "
enter No. of frames to capture : ";
cin >> no_frames;*/
//int flag;
int frameFinished;
//int got_picture;
int frame_index = 0;
AVPacket* outPacket = av_packet_alloc();
Timer* timer = new Timer();
timer->StartTimer();
int j = 0;
while (av_read_frame(ifmt_ctx, av_pkt) >= 0)
{
/* if (ii++ == no_frames)
break;*/
int iElapsedtime = timer->ElapsedTimeM();
int pts = iElapsedtime * 90;
if (iElapsedtime >20000)
break;
/*cout << ii++ <<"
";*/
int ret = avcodec_send_packet(avcodec_contx, av_pkt);
if (ret < 0)
{
printf("Error while sending packet");
}
frameFinished = true;
int response = 0;
response = avcodec_receive_frame(avcodec_contx, av_frame);
if (response < 0) //&& (response != AVERROR(EAGAIN)) && (response != AVERROR_EOF))
{
printf("Error while receiving frame from decoder");
frameFinished = false;
}
if (frameFinished)// Frame successfully decoded :)
{
//av_init_packet(outPacket); //error C4996: 'av_init_packet': was declared deprecated
memset(outPacket, 0, sizeof(AVPacket)); //???
outPacket->data = NULL; // packet data will be allocated by the encoder
outPacket->size = 0;
outPacket->pts = av_rescale_q(enc_packet_counter, av_cntx_out->time_base, video_stream->time_base); //???
//if (outPacket->dts != AV_NOPTS_VALUE)
// outPacket->dts = av_rescale_q(enc_packet_counter, av_cntx_out->time_base, video_stream->time_base); //???
outPacket->dts = av_rescale_q(enc_packet_counter, av_cntx_out->time_base, video_stream->time_base); //???
outPacket->duration =av_rescale_q(1, av_cntx_out->time_base, video_stream->time_base); //???
outFrame->pts = av_rescale_q(enc_packet_counter, av_cntx_out->time_base, video_stream->time_base); //???
outFrame->pkt_duration = av_rescale_q(enc_packet_counter, av_cntx_out->time_base, video_stream->time_base); //???
enc_packet_counter++;
//Apply color space conversion from BGRA to YUV420p using sws_scale
////////////////////////////////////////////////////////////////
int sts = sws_scale(swsCtx, //struct SwsContext *c,
av_frame->data, //const uint8_t *const srcSlice[],
av_frame->linesize, //const int srcStride[],
0, //int srcSliceY,
av_frame->height, //int srcSliceH,
outFrame->data, //uint8_t *const dst[],
outFrame->linesize); //const int dstStride[]);
if (sts < 0)
{
printf("Error while executing sws_scale");
}
////////////////////////////////////////////////////////////////
int ret = 0;
//int i = 0;
do
{
//cout << i++ << "
";
if (ret == AVERROR(EAGAIN))
{
av_packet_unref(outPacket);
ret = avcodec_receive_packet(av_cntx_out, outPacket);
if (ret) break; // deal with error
outPacket->duration = av_rescale_q(1, av_cntx_out->time_base, video_stream->time_base); //???
av_write_frame(ofmt_ctx, outPacket);
}
else if (ret != 0)
{
char str2[] = "";
cout << "
Error :" << av_make_error_string(str2, sizeof(str2), ret);
return -1;
}
ret = avcodec_send_frame(av_cntx_out, outFrame);
} while (ret);
} // frameFinished
av_packet_unref(av_pkt);
av_packet_unref(outPacket);
}
// flush the rest of the packets ???
////////////////////////////////////////////////////////////
int ret = 0;
//int i = 0;
avcodec_send_frame(av_cntx_out, NULL);
do
{
av_packet_unref(outPacket);
//cout << i++ << "
";
ret = avcodec_receive_packet(av_cntx_out, outPacket);
if (!ret)
{
outPacket->pts = av_rescale_q(enc_packet_counter, av_cntx_out->time_base, video_stream->time_base); //???
outPacket->dts = av_rescale_q(enc_packet_counter, av_cntx_out->time_base, video_stream->time_base); //???
outPacket->duration = av_rescale_q(1, av_cntx_out->time_base, video_stream->time_base); //???
av_write_frame(ofmt_ctx, outPacket);
enc_packet_counter++;
}
} while (!ret);
////////////////////////////////////////////////////////////
value = av_write_trailer(ofmt_ctx);
if (value < 0)
{
cout << "
error in writing av trailer";
exit(1);
}
//THIS WAS ADDED LATER
/*av_free(video_outbuf);*/
avformat_close_input(&ifmt_ctx);
if (!ifmt_ctx)
{
cout << "
file closed successfully";
}
else
{
cout << "
unable to close the file";
exit(1);
}
avformat_free_context(ifmt_ctx);
if (!ifmt_ctx)
{
cout << "
avformat free successfully";
}
else
{
cout << "
unable to free avformat context";
exit(1);
}
//Free codec context.
////////////////////////////////////////////////////////////
avcodec_free_context(&av_cntx_out);
if (!av_cntx_out)
{
cout << "
avcodec free successfully";
}
else
{
cout << "
unable to free avcodec context";
exit(1);
}
////////////////////////////////////////////////////////////
return 0;
}
解决方案
您可以测试我的代码,我无法重现您的问题。
确保您使用的不是调试版本的libav(未经优化构建的版本)。
以下是我用于测试的代码:
#define __STDC_CONSTANT_MACROS
#include <iostream>
#include <chrono>
#include <string>
extern "C"
{
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
#include <libavdevice/avdevice.h>
#include <libavutil/imgutils.h>
#include <libswscale/swscale.h>
}
using namespace std;
AVCodecContext* GetCodecContextFromPar(AVCodecParameters* par)
{
AVCodecContext* cntxt = NULL;
cntxt = avcodec_alloc_context3(avcodec_find_decoder(par->codec_id));
avcodec_parameters_to_context(cntxt, par);
return cntxt;
}
int AvCodecDecodeVideo2(AVCodecContext* avctx, AVFrame* frame, int* got_picture_ptr, const AVPacket* avpkt)
{
int ret = avcodec_send_packet(avctx, avpkt);
if (ret < 0)
{
return -1;
*got_picture_ptr = 0;
}
while (ret >= 0)
{
ret = avcodec_receive_frame(avctx, frame);
}
*got_picture_ptr = 1;
return 0;
}
int main(int argc, char** argv)
{
int framerate = 30;
int width = 800;
int height = 600;
int no_frames = 100;
//const char* out_filename = "D:\myfolder\to\output\new_out.mp4";
const char* out_filename = "new_out.mp4";
avdevice_register_all();
AVOutputFormat* ofmt = NULL;
AVInputFormat* ifmt = NULL;
AVFormatContext* ifmt_ctx = avformat_alloc_context();
AVFormatContext* ofmt_ctx = avformat_alloc_context();
AVCodecParameters* av_codec_par_in = avcodec_parameters_alloc();
AVCodecParameters* av_codec_par_out = avcodec_parameters_alloc();
AVCodecContext* avcodec_contx = NULL;
AVCodec* av_codec;
AVStream* video_stream = NULL;
av_codec_par_out->width = width;
av_codec_par_out->height = height;
av_codec_par_out->bit_rate = 40000;
av_codec_par_out->codec_id = AV_CODEC_ID_H264; //AV_CODEC_ID_MPEG4; //Try H.264 instead of MPEG4
av_codec_par_out->codec_type = AVMEDIA_TYPE_VIDEO;
av_codec_par_out->format = 0;
av_codec_par_out->sample_aspect_ratio.den = 3;
av_codec_par_out->sample_aspect_ratio.num = 4;
AVDictionary* options = NULL;
//Try adding "-rtbufsize 100M" as in https://stackoverflow.com/questions/6766333/capture-windows-screen-with-ffmpeg
av_dict_set(&options, "rtbufsize", "100M", 0);
av_dict_set(&options, "framerate", std::to_string(framerate).c_str(), 0);
av_dict_set(&options, "offset_x", "20", 0);
av_dict_set(&options, "offset_y", "40", 0);
av_dict_set(&options, "video_size", (std::to_string(width)+"x"+std::to_string(height)).c_str(), 0); //av_dict_set(&options, "video_size", "640x480", 0);
//int ret, i;
ifmt = av_find_input_format("gdigrab");
if (avformat_open_input(&ifmt_ctx, "desktop", ifmt, &options) < 0)
{
cout << "Error in opening file";
exit(1);
}
int VideoStreamIndx = -1;
avformat_find_stream_info(ifmt_ctx, NULL);
/* find the first video stream index . Also there is an API available to do the below operations */
for (int i = 0; i < (int)ifmt_ctx->nb_streams; i++) // find video stream position/index.
{
if (ifmt_ctx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO)
{
VideoStreamIndx = i;
break;
}
}
if (VideoStreamIndx == -1)
{
cout << "
unable to find the video stream index. (-1)";
exit(1);
}
av_codec_par_in = ifmt_ctx->streams[VideoStreamIndx]->codecpar;
av_codec = avcodec_find_decoder(av_codec_par_in->codec_id);
if (av_codec == NULL)
{
cout << "
unable to find the decoder";
exit(1);
}
avcodec_contx = avcodec_alloc_context3(av_codec);
//Consider using preset and crf
//av_opt_set(avcodec_contx->priv_data, "preset", "fast", 0);
//av_opt_set(avcodec_contx->priv_data, "crf", "18", 0);
if (avcodec_parameters_to_context(avcodec_contx, av_codec_par_in) < 0)
{
cout << "
error in converting the codec contexts";
exit(1);
}
av_dict_set(&options, "r", std::to_string(framerate).c_str(), 0); //Do we have to set the framerate?
//av_dict_set
int value = avcodec_open2(avcodec_contx, av_codec, NULL);//Initialize the AVCodecContext to use the given AVCodec.
if (value < 0)
{
cout << "
unable to open the av codec";
exit(1);
}
value = 0;
ofmt = av_guess_format(NULL, out_filename, NULL);
if (!ofmt)
{
cout << "
error in guessing the video format. try with correct format";
exit(1);
}
avformat_alloc_output_context2(&ofmt_ctx, ofmt, NULL, out_filename);
if (!ofmt_ctx)
{
cout << "
error in allocating av format output context";
exit(1);
}
AVCodec* av_codec_out = avcodec_find_encoder(av_codec_par_out->codec_id);
if (av_codec_out == NULL)
{
cout << "
unable to find the encoder";
exit(1);
}
video_stream = avformat_new_stream(ofmt_ctx, av_codec_out);
if (!video_stream)
{
cout << "
error in creating a av format new stream";
exit(1);
}
AVCodecContext* av_cntx_out;
av_cntx_out = avcodec_alloc_context3(av_codec_out);
if (!av_cntx_out)
{
cout << "
error in allocating the codec contexts";
exit(1);
}
if (avcodec_parameters_copy(video_stream->codecpar, av_codec_par_out) < 0)
{
cout << "
Codec parameter canot copied";
exit(1);
}
if (avcodec_parameters_to_context(av_cntx_out, av_codec_par_out) < 0)
{
cout << "
error in converting the codec contexts";
exit(1);
}
av_cntx_out->gop_size = 30;//3; //Use I-Frame frame every 30 frames.
av_cntx_out->max_b_frames = 2;
av_cntx_out->time_base.num = 1;
av_cntx_out->time_base.den = framerate;
avio_open(&ofmt_ctx->pb, out_filename, AVIO_FLAG_READ_WRITE);
if (avformat_write_header(ofmt_ctx, NULL) < 0)
{
cout << "
error in writing the header context";
exit(1);
}
value = avcodec_open2(av_cntx_out, av_codec_out, NULL);//Initialize the AVCodecContext to use the given AVCodec.
if (value < 0)
{
cout << "
unable to open the av codec";
exit(1);
}
if (avcodec_contx->codec_id == AV_CODEC_ID_H264)
{
//av_opt_set(av_cntx_out->priv_data, "preset", "slow", 0); //"slow" may be a problem...
av_opt_set(av_cntx_out->priv_data, "preset", "fast", 0);
}
if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER)
{
av_cntx_out->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
}
AVPacket* av_pkt = av_packet_alloc();
//av_init_packet(av_pkt); //error C4996: 'av_init_packet': was declared deprecated
memset(av_pkt, 0, sizeof(AVPacket)); //???
AVFrame* av_frame = av_frame_alloc();
if (!av_frame)
{
cout << "
unable to release the avframe resources";
exit(1);
}
AVFrame* outFrame = av_frame_alloc();//Allocate an AVFrame and set its fields to default values.
if (!outFrame)
{
cout << "
unable to release the avframe resources for outframe";
exit(1);
}
//int video_outbuf_size;
//int nbytes = av_image_get_buffer_size(av_cntx_out->pix_fmt, av_cntx_out->width, av_cntx_out->height, 32);
//uint8_t* video_outbuf = (uint8_t*)av_malloc(nbytes);
//if (video_outbuf == NULL)
//{
// cout << "
unable to allocate memory";
// exit(1);
//}
av_frame->width = avcodec_contx->width;
av_frame->height = avcodec_contx->height;
av_frame->format = av_codec_par_in->format;
outFrame->width = av_cntx_out->width;
outFrame->height = av_cntx_out->height;
outFrame->format = av_codec_par_out->format;
av_frame_get_buffer(av_frame, 0);
av_frame_get_buffer(outFrame, 0);
//value = av_image_fill_arrays(outFrame->data, outFrame->linesize, video_outbuf, av_cntx_out->pix_fmt, av_cntx_out->width, av_cntx_out->height, 32); // returns : the size in bytes required for src
//if (value < 0)
//{
// cout << "
error in filling image array";
//}
SwsContext* swsCtx = sws_alloc_context();
if (sws_init_context(swsCtx, NULL, NULL) < 0)
{
cout << "
Unable to Initialize the swscaler context sws_context.";
exit(1);
}
swsCtx = sws_getContext(avcodec_contx->width, avcodec_contx->height, avcodec_contx->pix_fmt,
av_cntx_out->width, av_cntx_out->height, av_cntx_out->pix_fmt,
SWS_FAST_BILINEAR, NULL, NULL, NULL);
if (swsCtx == NULL)
{
cout << "
Cannot allocate SWC Context";
exit(1);
}
int ii = 0;
int enc_packet_counter = 0; //Count encoded frames.
//cout << "
enter No. of frames to capture : ";
//cin >> no_frames;
//int flag;
int frameFinished;
//int got_picture;
int frame_index = 0;
AVPacket* outPacket = av_packet_alloc();
auto start = chrono::system_clock::now();
uint64_t sum_elapsed_time_usec = 0;
int elapsed_time_counter = 0;
int j = 0;
while (av_read_frame(ifmt_ctx, av_pkt) >= 0)
{
int elapsed_time_usec = (int)chrono::duration_cast<chrono::microseconds>(chrono::system_clock::now() - start).count();
start = chrono::system_clock::now();
if (ii > 0)
{
printf("elapsed_time_usec = %d
", elapsed_time_usec);
sum_elapsed_time_usec += (uint64_t)elapsed_time_usec;
elapsed_time_counter++;
}
if (ii++ == no_frames)
break;
if (av_pkt->stream_index == VideoStreamIndx)
{
//value = AvCodecDecodeVideo2(avcodec_contx, av_frame, &frameFinished, av_pkt);
//if (value < 0)
//{
// cout << "unable to decode video";
// exit(1);
//}
int ret = avcodec_send_packet(avcodec_contx, av_pkt);
if (ret < 0)
{
printf("Error while sending packet");
}
frameFinished = true;
int response = 0;
//av_frame_unref(av_frame); //???
//do
//{
response = avcodec_receive_frame(avcodec_contx, av_frame);
if (response < 0) //&& (response != AVERROR(EAGAIN)) && (response != AVERROR_EOF))
{
printf("Error while receiving frame from decoder");
frameFinished = false;
}
//}
//while (response == AVERROR(EAGAIN));
if (frameFinished)// Frame successfully decoded :)
{
//av_init_packet(outPacket); //error C4996: 'av_init_packet': was declared deprecated
memset(outPacket, 0, sizeof(AVPacket)); //???
//int iHeight =sws_scale(swsCtx, av_frame->data, av_frame->linesize, 0, avcodec_contx->height, outFrame->data, outFrame->linesize);
outPacket->data = NULL; // packet data will be allocated by the encoder
outPacket->size = 0;
outPacket->pts = av_rescale_q(enc_packet_counter, av_cntx_out->time_base, video_stream->time_base);
if (outPacket->dts != AV_NOPTS_VALUE)
outPacket->dts = av_rescale_q(enc_packet_counter, av_cntx_out->time_base, video_stream->time_base);
outPacket->dts = av_rescale_q(enc_packet_counter, av_cntx_out->time_base, video_stream->time_base);
outPacket->duration = av_rescale_q(1, av_cntx_out->time_base, video_stream->time_base);
outFrame->pts = av_rescale_q(enc_packet_counter, av_cntx_out->time_base, video_stream->time_base);
outFrame->pkt_duration = av_rescale_q(enc_packet_counter, av_cntx_out->time_base, video_stream->time_base);
enc_packet_counter++;
//Apply color space conversion from BGRA to YUV420p using sws_scale
////////////////////////////////////////////////////////////////
int sts = sws_scale(swsCtx, //struct SwsContext *c,
av_frame->data, //const uint8_t *const srcSlice[],
av_frame->linesize, //const int srcStride[],
0, //int srcSliceY,
av_frame->height, //int srcSliceH,
outFrame->data, //uint8_t *const dst[],
outFrame->linesize); //const int dstStride[]);
if (sts < 0)
{
printf("Error while executing sws_scale");
}
////////////////////////////////////////////////////////////////
int ret = 0;
do
{
if (ret == AVERROR(EAGAIN))
{
av_packet_unref(outPacket);
ret = avcodec_receive_packet(av_cntx_out, outPacket);
if (ret) break; // deal with error
outPacket->duration = av_rescale_q(1, av_cntx_out->time_base, video_stream->time_base);
av_write_frame(ofmt_ctx, outPacket);
}
else if (ret != 0)
{
char str2[] = "";
cout << "
Error :" << av_make_error_string(str2, sizeof(str2), ret);
return -1;
}
ret = avcodec_send_frame(av_cntx_out, outFrame);
} while (ret);
} // frameFinished
}
}// End of while-loop
// flush the rest of the packets
////////////////////////////////////////////////////////////
int ret = 0;
avcodec_send_frame(av_cntx_out, NULL);
do
{
av_packet_unref(outPacket);
ret = avcodec_receive_packet(av_cntx_out, outPacket);
if (!ret)
{
//outPacket->pts = av_rescale_q(enc_packet_counter, av_cntx_out->time_base, video_stream->time_base); <--- Bug: We shouldn't set PTS, DTS, and duration when flushing.
//outPacket->dts = av_rescale_q(enc_packet_counter, av_cntx_out->time_base, video_stream->time_base);
//outPacket->duration = av_rescale_q(1, av_cntx_out->time_base, video_stream->time_base);
av_write_frame(ofmt_ctx, outPacket);
//enc_packet_counter++;
}
} while (!ret);
////////////////////////////////////////////////////////////
value = av_write_trailer(ofmt_ctx);
if (value < 0)
{
cout << "
error in writing av trailer";
exit(1);
}
//THIS WAS ADDED LATER
/*av_free(video_outbuf);*/
avformat_close_input(&ifmt_ctx);
if (!ifmt_ctx)
{
cout << "
file closed successfully";
}
else
{
cout << "
unable to close the file";
exit(1);
}
avformat_free_context(ifmt_ctx);
if (!ifmt_ctx)
{
cout << "
avformat free successfully";
}
else
{
cout << "
unable to free avformat context";
exit(1);
}
//Free codec context.
////////////////////////////////////////////////////////////
avcodec_free_context(&av_cntx_out);
if (!av_cntx_out)
{
cout << "
avcodec free successfully";
}
else
{
cout << "
unable to free avcodec context";
exit(1);
}
////////////////////////////////////////////////////////////
printf("
Average sum_elapsed_time_usec = %lf
", (double)sum_elapsed_time_usec / (double)elapsed_time_counter);
return 0;
}
控制台输出(移动窗口时):
[gdigrab @ 000001f0b7a461c0] Capturing whole desktop as 800x600x32 at (20,40)
[libx264 @ 000001f0b7a4de80] using SAR=4/3
[libx264 @ 000001f0b7a4de80] using cpu capabilities: MMX2 SSE2Fast SSSE3 SSE4.2 AVX
[libx264 @ 000001f0b7a4de80] profile High, level 3.1, 4:2:0, 8-bit
elapsed_time_usec = 3402
elapsed_time_usec = 3235
elapsed_time_usec = 19030
elapsed_time_usec = 33324
elapsed_time_usec = 33684
elapsed_time_usec = 33056
elapsed_time_usec = 49883
elapsed_time_usec = 18250
elapsed_time_usec = 48033
elapsed_time_usec = 16783
elapsed_time_usec = 49308
elapsed_time_usec = 17603
elapsed_time_usec = 32805
elapsed_time_usec = 50674
elapsed_time_usec = 16825
elapsed_time_usec = 32926
elapsed_time_usec = 34245
elapsed_time_usec = 34179
elapsed_time_usec = 32208
elapsed_time_usec = 33116
elapsed_time_usec = 33392
elapsed_time_usec = 32914
elapsed_time_usec = 34059
elapsed_time_usec = 36754
elapsed_time_usec = 45127
elapsed_time_usec = 17326
elapsed_time_usec = 33996
elapsed_time_usec = 32753
elapsed_time_usec = 33168
elapsed_time_usec = 33474
elapsed_time_usec = 36049
elapsed_time_usec = 50184
elapsed_time_usec = 14186
elapsed_time_usec = 35353
elapsed_time_usec = 73929
elapsed_time_usec = 54520
elapsed_time_usec = 28031
elapsed_time_usec = 26223
...
...
elapsed_time_usec = 49823
elapsed_time_usec = 19345
elapsed_time_usec = 47076
elapsed_time_usec = 17855
elapsed_time_usec = 33107
elapsed_time_usec = 33541
elapsed_time_usec = 49821
elapsed_time_usec = 18044
elapsed_time_usec = 47415
elapsed_time_usec = 16301
elapsed_time_usec = 33777
elapsed_time_usec = 34473
elapsed_time_usec = 49104
elapsed_time_usec = 16492
elapsed_time_usec = 33834
elapsed_time_usec = 32695
elapsed_time_usec = 32798
elapsed_time_usec = 33862
file closed successfully
avformat free successfully[libx264 @ 000001f0b7a4de80] frame I:203 Avg QP: 0.90 size:125931
[libx264 @ 000001f0b7a4de80] frame P:371 Avg QP: 2.60 size: 46555
[libx264 @ 000001f0b7a4de80] frame B:426 Avg QP: 3.74 size: 2560
[libx264 @ 000001f0b7a4de80] consecutive B-frames: 31.9% 16.8% 51.3%
[libx264 @ 000001f0b7a4de80] mb I I16..4: 63.3% 0.4% 36.3%
[libx264 @ 000001f0b7a4de80] mb P I16..4: 17.1% 0.3% 11.9% P16..4: 5.5% 1.0% 0.8% 0.0% 0.0% skip:63.4%
[libx264 @ 000001f0b7a4de80] mb B I16..4: 0.5% 0.0% 0.3% B16..8: 4.1% 0.1% 0.2% direct: 0.3% skip:94.5% L0:37.4% L1:60.1% BI: 2.5%
[libx264 @ 000001f0b7a4de80] final ratefactor: -14.26
[libx264 @ 000001f0b7a4de80] 8x8 transform intra:0.6% inter:23.5%
[libx264 @ 000001f0b7a4de80] coded y,uvDC,uvAC intra: 27.4% 38.7% 38.6% inter: 3.1% 2.9% 2.8%
[libx264 @ 000001f0b7a4de80] i16 v,h,dc,p: 76% 23% 1% 0%
[libx264 @ 000001f0b7a4de80] i8 v,h,dc,ddl,ddr,vr,hd,vl,hu: 25% 24% 41% 1% 1% 3% 1% 0% 3%
[libx264 @ 000001f0b7a4de80] i4 v,h,dc,ddl,ddr,vr,hd,vl,hu: 35% 36% 12% 2% 3% 3% 4% 2% 4%
[libx264 @ 000001f0b7a4de80] i8c dc,h,v,p: 64% 23% 12% 2%
[libx264 @ 000001f0b7a4de80] Weighted P-Frames: Y:0.0% UV:0.0%
[libx264 @ 000001f0b7a4de80] ref P L0: 69.7% 9.3% 15.4% 5.6%
[libx264 @ 000001f0b7a4de80] ref B L0: 91.7% 7.0% 1.4%
[libx264 @ 000001f0b7a4de80] kb/s:3.51
avcodec free successfully
Average sum_elapsed_time_usec = 33848.571000
有波动,但平均值约为33毫秒。
使用FFbe验证时间戳的正确性:
ffprobe -i new_out.mp4 -show_packets > 1.txt
我看不到任何问题:
[PACKET]
codec_type=video
stream_index=0
pts=0
pts_time=0.000000
dts=-6000
dts_time=-0.066667
duration=3000
duration_time=0.033333
size=6069
pos=48
flags=K_
[/PACKET]
[PACKET]
codec_type=video
stream_index=0
pts=9000
pts_time=0.100000
dts=-3000
dts_time=-0.033333
duration=3000
duration_time=0.033333
size=7728
pos=6117
flags=__
[/PACKET]
[PACKET]
codec_type=video
stream_index=0
pts=3000
pts_time=0.033333
dts=0
dts_time=0.000000
duration=3000
duration_time=0.033333
size=151
pos=13845
flags=__
[/PACKET]
[PACKET]
codec_type=video
stream_index=0
pts=6000
pts_time=0.066667
dts=3000
dts_time=0.033333
duration=3000
duration_time=0.033333
size=78
pos=13996
flags=__
[/PACKET]
[PACKET]
codec_type=video
stream_index=0
pts=12000
pts_time=0.133333
dts=6000
dts_time=0.066667
duration=3000
duration_time=0.033333
size=1684
pos=14074
flags=__
[/PACKET]
[PACKET]
codec_type=video
stream_index=0
pts=15000
pts_time=0.166667
dts=9000
dts_time=0.100000
duration=3000
duration_time=0.033333
size=887
pos=15758
flags=__
[/PACKET]
[PACKET]
codec_type=video
stream_index=0
pts=18000
pts_time=0.200000
dts=12000
dts_time=0.133333
duration=3000
duration_time=0.033333
size=17105
pos=16645
flags=__
[/PACKET]
[PACKET]
codec_type=video
stream_index=0
pts=21000
pts_time=0.233333
dts=15000
dts_time=0.166667
duration=3000
duration_time=0.033333
size=26745
pos=33750
flags=__
[/PACKET]
相关文章