快上网专注成都网站设计 成都网站制作 成都网站建设
成都网站建设公司服务热线:028-86922220

网站建设知识

十年网站开发经验 + 多家企业客户 + 靠谱的建站团队

量身定制 + 运营维护+专业推广+无忧售后,网站问题一站解决

FFmpegavformat_find_stream_info函数优化

背景
        一般的应用场景对实时点播速度要求不高的情况下,可以设置探测码流的延时和探测数据的大小,代码如下:
    pFormatContext->probesize = 500 *1024;
    pFormatContext->max_analyze_duration = 5 * AV_TIME_BASE;//AV_TIME_BASE是定义的时间标准,代表1秒

创新互联建站-专业网站定制、快速模板网站建设、高性价比马边彝族网站开发、企业建站全套包干低至880元,成熟完善的模板库,直接使用。一站式马边彝族网站制作公司更省心,省钱,快速模板网站建设找我们,业务覆盖马边彝族地区。费用合理售后完善,10余年实体公司更值得信赖。

弊端

        这样设置probesize和max_analyze_duration是可以减少探测时间,但是是以牺牲成功率为代价的,有时候探测不到流信息,就会播不出来,

出现在网络丢包的情况下(使用UDP进行视频数据的传输)或者网路特别复杂,跨越多个网段

探测码流有没有用于解码显示问题

AVFMT_FLAG_NOBUFFER宏定义剖析

默认情况下,读取的缓存数据将会用于解码,如果不想探测的码流用于显示,可以这样子设置:
pAVFormatContext->flags =pAVFormatContext->flags & AVFMT_FLAG_NOBUFFER;

代码剖析
函数调用int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options)
        if (ic->flags & AVFMT_FLAG_NOBUFFER)
            free_packet_buffer(&ic->packet_buffer, &ic->packet_buffer_end);

问题描述:因为没有设置该宏定义,传递进去I帧数据,应该会被保存下来,但是明显的得到PPS/SPS不存在的异常,究竟传输进去的码流哪里去了??

场景
    要求点播时间不超过1秒,允许指定摄像机的视频参数如下:
当前只考虑视频流,后续会添加音频流,已知输入的流格式video: H264 1920*1080 25fps

实施方案
    当前采用FFmpeg 3.4版本,参考博客采用的是比较旧的版本2.2.0版本,有些函数已经被废弃,有些函数已经发生了改变,但是基本思路是不变的:
通过手动指定×××参数,来取代avformat_find_stream_info函数探测流格式


AVStream* CDecoder::CreateStream(AVFormatContext* pFormatContext, int nCodecType)

{

AVStream *st = avformat_new_stream(pFormatContext, NULL);

if (!st)

return NULL;

st->codecpar->codec_type = (AVMediaType)nCodecType;

return st;

}



int CDecoder::GetVideoExtraData(AVFormatContext* pFormatContext, int nVideoIndex)

{

int  type, size, flags, pos, stream_type;

int ret = -1;

int64_t dts;

bool got_extradata = false;


if (!pFormatContext || nVideoIndex < 0 || nVideoIndex > 2)

return ret;


for (;; avio_skip(pFormatContext->pb, 4)) {

pos = avio_tell(pFormatContext->pb);

type = avio_r8(pFormatContext->pb);

size = avio_rb24(pFormatContext->pb);

dts = avio_rb24(pFormatContext->pb);

dts |= avio_r8(pFormatContext->pb) << 24;

avio_skip(pFormatContext->pb, 3);


if (0 == size)

break;

if (FLV_TAG_TYPE_AUDIO == type || FLV_TAG_TYPE_META == type) {

/*if audio or meta tags, skip them.*/

avio_seek(pFormatContext->pb, size, SEEK_CUR);

}

else if (type == FLV_TAG_TYPE_VIDEO) {

/*if the first video tag, read the sps/pps info from it. then break.*/

size -= 5;

pFormatContext->streams[nVideoIndex]->codecpar->extradata = (uint8_t*)av_malloc(size + FF_INPUT_BUFFER_PADDING_SIZE);

if (NULL == pFormatContext->streams[nVideoIndex]->codecpar->extradata)

break;

memset(pFormatContext->streams[nVideoIndex]->codecpar->extradata, 0, size + FF_INPUT_BUFFER_PADDING_SIZE);

memcpy(pFormatContext->streams[nVideoIndex]->codecpar->extradata, pFormatContext->pb->buf_ptr + 5, size);

pFormatContext->streams[nVideoIndex]->codecpar->extradata_size = size;

ret = 0;

got_extradata = true;

}

else {

/*The type unknown,something wrong.*/

break;

}


if (got_extradata)

break;

}


return ret;

}


int CDecoder::InitDecode(AVFormatContext *pFormatContext)

{

int video_index = -1;

int audio_index = -1;

int ret = -1;


if (!pFormatContext)

return ret;


/*

Get video stream index, if no video stream then create it.

And audio so on.

*/

if (0 == pFormatContext->nb_streams) {

CreateStream(pFormatContext, AVMEDIA_TYPE_VIDEO);

CreateStream(pFormatContext, AVMEDIA_TYPE_AUDIO);

video_index = 0;

audio_index = 1;

}

else if (1 == pFormatContext->nb_streams) {

if (AVMEDIA_TYPE_VIDEO == pFormatContext->streams[0]->codecpar->codec_type) {

CreateStream(pFormatContext, AVMEDIA_TYPE_AUDIO);

video_index = 0;

audio_index = 1;

}

else if (AVMEDIA_TYPE_AUDIO == pFormatContext->streams[0]->codecpar->codec_type) {

CreateStream(pFormatContext, AVMEDIA_TYPE_VIDEO);

video_index = 1;

audio_index = 0;

}

}

else if (2 == pFormatContext->nb_streams) {

if (AVMEDIA_TYPE_VIDEO == pFormatContext->streams[0]->codecpar->codec_type) {

video_index = 0;

audio_index = 1;

}

else if (AVMEDIA_TYPE_VIDEO == pFormatContext->streams[1]->codecpar->codec_type) {

video_index = 1;

audio_index = 0;

}

}


/*Error. I can't find video stream.*/

if (video_index != 0 && video_index != 1)

return ret;


//Init the audio codec(AAC).

pFormatContext->streams[audio_index]->codecpar->codec_id = AV_CODEC_ID_AAC;

pFormatContext->streams[audio_index]->codecpar->sample_rate = 44100;

pFormatContext->streams[audio_index]->codecpar->bits_per_coded_sample = 16;

pFormatContext->streams[audio_index]->codecpar->channels = 2;

pFormatContext->streams[audio_index]->codecpar->channel_layout = 3;

pFormatContext->streams[audio_index]->pts_wrap_bits = 32;

pFormatContext->streams[audio_index]->time_base.den = 1000;

pFormatContext->streams[audio_index]->time_base.num = 1;


//Init the video codec(H264).

pFormatContext->streams[video_index]->codecpar->codec_type = AVMEDIA_TYPE_VIDEO;

pFormatContext->streams[video_index]->codecpar->codec_id = AV_CODEC_ID_H264;

pFormatContext->streams[video_index]->codecpar->format = 12;

pFormatContext->streams[video_index]->codecpar->bits_per_raw_sample = 8;

pFormatContext->streams[video_index]->codecpar->profile = 66;

pFormatContext->streams[video_index]->codecpar->level = 42;

pFormatContext->streams[video_index]->codecpar->width = 1920;

pFormatContext->streams[video_index]->codecpar->height = 1080;

pFormatContext->streams[video_index]->codecpar->sample_aspect_ratio.num = 0;

pFormatContext->streams[video_index]->codecpar->sample_aspect_ratio.den = 1;



pFormatContext->streams[video_index]->pts_wrap_bits = 64;

pFormatContext->streams[video_index]->time_base.den = 1200000;

pFormatContext->streams[video_index]->time_base.num = 1;

pFormatContext->streams[video_index]->avg_frame_rate.den = 1;

pFormatContext->streams[video_index]->avg_frame_rate.num = 25;

/*Need to change, different condition has different frame_rate. 'r_frame_rate' is new in ffmepg2.3.3*/

pFormatContext->streams[video_index]->r_frame_rate.den = 25;

pFormatContext->streams[video_index]->r_frame_rate.num = 1;

/* H264 need sps/pps for decoding, so read it from the first video tag.*/

ret = GetVideoExtraData(pFormatContext, video_index);


/*Update the AVFormatContext Info*/

pFormatContext->nb_streams = 1;

/*empty the buffer.*/

pFormatContext->pb->buf_ptr = pFormatContext->pb->buf_end;


return ret;

}


已有的方案

AVDictionary* pOptions = NULL;

pFormatCtx->probesize = 200 *1024;

pFormatCtx->max_analyze_duration = 3 * AV_TIME_BASE;


//Retrieve stream information

 if (avformat_find_stream_info(pFormatCtx, &pOptions) < 0)

 {

 return -1; // Couldn't find stream information

 }


现有方案

InitDecode(pFormatCtx);


警告

在上述的解决方案中,采用了av_malloc分配内存,但是没有进行内存的释放,是否会随着pFormatCtx的释放而释放,不至于导致内存泄露,当然经过测试,并没有走到这一步


优化效果

通过测试,速度优化了1200毫秒

测试结果
    探测ES流,avformat_open_input会非常快的返回,PS反而是一个例外。通过调用av_log_set_callback设置日志写文件的方式,
在调用avformat_open_input函数探测PS输入格式时候,
会打印如下的日志:
Probing mp3 score:1 size:2048
Probing mp3 score:1 size:4096
Probing mp3 score:1 size:8192
Probing mp3 score:1 size:16384
Probing h364 score:51 size:32768
Format h364 probed with size=32768 and score=51
Input #0, h364, from '':
  Duration: N/A, bitrate: N/A
    Stream #0:0, 0, 1/1200000: Video: h364 (Baseline), yuvj420p, 1920x1080, 25 fps, 0.04 tbr, 1200k tbn
deprecated pixel format used, make sure you did set range correctly
non-existing PPS 0 referenced
non-existing PPS 0 referenced
nal_unit_type: 1, nal_ref_idc: 3
non-existing PPS 0 referenced
non-existing PPS 0 referenced
decode_slice_header error
non-existing PPS 0 referenced
non-existing PPS 0 referenced
non-existing PPS 0 referenced
no frame!

通过跟踪源码Probing h364 score:51 size:32768日志打印在调用
av_probe_input_format3函数会打印
name h364
long_name raw H.264 video
raw_codec_id 28
说明如果指定了AVInputFormat结构体,就可以节省探测码流格式的时间

ffmpeg 针对指定的h364 es流延时优化
参考http://blog.csdn.net/rain_2011_kai/article/details/7746805文章,
是否只需要知道发送端发送的视屏的×××ID,视频帧的长和宽,就可以直接
直接省略掉ffmpeg库的视频流探测接口,avformat_open_input函数
和avformat_find_stream_info函数耗时超过500毫秒

手动指定解码格式 效果不明显
+buffer0x00000000002cbdc0  <字符串中的字符无效。>unsigned char *

+buf_end0x00000000002d3626  <字符串中的字符无效。>unsigned char *


30822


pos 262349

buffer  2932160

buf_end 2962982

buffsize 35840

buf_end-buffer 30822

pos的值从哪里来,值得考虑

pFormatContext->pb->pos = pFormatContext->pb->buf_end;

在已有的版本是编译不过的,因为pos是一个64位整型,buf_end是一个字符指针

但是从上面还是看不出它们之间的关系,尽管手动指定解码格式,但是效果并不理想


还有在这里读取一帧的数据

m_pVideoc->io_ctx = avio_alloc_context(m_pAvioBuf, BUF_SIZE, 0, this, ReadStreamData, NULL, NULL);

设置BUF_SIZE的大小为4*1024,实际上是否有效果,尚未可知




参考

http://blog.csdn.net/STN_LCD/article/details/74935760

https://jiya.io/archives/vlc_optimize_1.html

打印码流详情av_dump_format函数使用

函数说明
        一般使用av_find_stream_info函数探测码流格式,它的作用是为pFormatContext->streams填充上正确的音视频格式信息。可以通过av_dump_format函数将音视频数据格式通过av_log输出到指定的文件或者控制台,方便开发者了解输入的视音频格式,对于程序的调用,删除该函数的调用没有任何的影响
/**
 * Print detailed information about the input or output format, such as
 * duration, bitrate, streams, container, programs, metadata, side data,
 * codec and time base(打印输入或者输出格式的信息,例如视频总时长,波特率,流,容器,程序,元数据,边数据,编码器,时间戳).
 *
 * @param ic        the context to analyze(被分析的视音频上下文)
 * @param index     index of the stream to dump information about(需要解析的流索引号,例如0是视频流,1是音频流)
 * @param url       the URL to print, such as source or destination file(打印URL,例如源文件或者目的文件)
 * @param is_output Select whether the specified context is an input(0) or output(1)(指定的视音频上下文是输入流(0)还是输出流(1))
 */
void av_dump_format(AVFormatContext *ic,
                    int index,
                    const char *url,
                    int is_output);

调用代码
        av_dump_format(pFormatContext, 0, "", 0);
注意:最后一个参数如果是输入流取值0,如果是输出流取值1
通过av_log_set_callback设置日志文件输出,输出的结果如下:
Input #0, h364, from '':
  Duration: N/A, bitrate: N/A
    Stream #0:0, 0, 1/1200000: Video: h364 (Baseline), yuvj420p, 1920x1080, 25 fps, 0.04 tbr, 1200k tbn
当前没有打印视频总时长是因为这是h364裸留数据

函数代码剖析
void av_dump_format(AVFormatContext *ic, int index, const char *url, int is_output)
{
    int i;
    uint8_t *printed = ic->nb_streams ? av_mallocz(ic->nb_streams) : NULL;
    if (ic->nb_streams && !printed)
        return;
//如果is_output是0,说明是输入流
    av_log(NULL, AV_LOG_INFO, "%s #%d, %s, %s '%s':\n",
           is_output ? "Output" : "Input",
           index,
           is_output ? ic->oformat->name : ic->iformat->name,
           is_output ? "to" : "from", url);
    dump_metadata(NULL, ic->metadata, "  ");
//如果is_output指定为0,开始计算视频总时长
    if (!is_output) {
        av_log(NULL, AV_LOG_INFO, "  Duration: ");
//如果上下文中的总时长字段有效,说明不是h364裸留,h364裸留是没有dts/pts字段数据的,开始计算视频总时长
        if (ic->duration != AV_NOPTS_VALUE) {
            int hours, mins, secs, us;
            int64_t duration = ic->duration + (ic->duration <= INT64_MAX - 5000 ? 5000 : 0);
            secs  = duration / AV_TIME_BASE;
            us    = duration % AV_TIME_BASE;
            mins  = secs / 60;
            secs %= 60;
            hours = mins / 60;
            mins %= 60;
            av_log(NULL, AV_LOG_INFO, "%02d:%02d:%02d.%02d", hours, mins, secs,
                   (100 * us) / AV_TIME_BASE);
        } else {
            av_log(NULL, AV_LOG_INFO, "N/A");
        }
       //判断开始是否开始时间有效
        if (ic->start_time != AV_NOPTS_VALUE) {
            int secs, us;
            av_log(NULL, AV_LOG_INFO, ", start: ");
            secs = llabs(ic->start_time / AV_TIME_BASE);
            us   = llabs(ic->start_time % AV_TIME_BASE);
            av_log(NULL, AV_LOG_INFO, "%s%d.%06d",
                   ic->start_time >= 0 ? "" : "-",
                   secs,
                   (int) av_rescale(us, 1000000, AV_TIME_BASE));
        }
        av_log(NULL, AV_LOG_INFO, ", bitrate: ");
        if (ic->bit_rate)
            av_log(NULL, AV_LOG_INFO, "%"PRId64" kb/s", ic->bit_rate / 1000);
        else
            av_log(NULL, AV_LOG_INFO, "N/A");
        av_log(NULL, AV_LOG_INFO, "\n");
    }
    for (i = 0; i < ic->nb_chapters; i++) {
        AVChapter *ch = ic->chapters[i];
        av_log(NULL, AV_LOG_INFO, "    Chapter #%d:%d: ", index, i);
        av_log(NULL, AV_LOG_INFO,
               "start %f, ", ch->start * av_q2d(ch->time_base));
        av_log(NULL, AV_LOG_INFO,
               "end %f\n", ch->end * av_q2d(ch->time_base));
        dump_metadata(NULL, ch->metadata, "    ");
    }
    if (ic->nb_programs) {
        int j, k, total = 0;
        for (j = 0; j < ic->nb_programs; j++) {
            AVDictionaryEntry *name = av_dict_get(ic->programs[j]->metadata,
                                                  "name", NULL, 0);
            av_log(NULL, AV_LOG_INFO, "  Program %d %s\n", ic->programs[j]->id,
                   name ? name->value : "");
            dump_metadata(NULL, ic->programs[j]->metadata, "    ");
            for (k = 0; k < ic->programs[j]->nb_stream_indexes; k++) {
                dump_stream_format(ic, ic->programs[j]->stream_index[k],
                                   index, is_output);
                printed[ic->programs[j]->stream_index[k]] = 1;
            }
            total += ic->programs[j]->nb_stream_indexes;
        }
        if (total < ic->nb_streams)
            av_log(NULL, AV_LOG_INFO, "  No Program\n");
    }
    for (i = 0; i < ic->nb_streams; i++)
        if (!printed[i])
            dump_stream_format(ic, i, index, is_output);
    av_free(printed);
}

 


网页标题:FFmpegavformat_find_stream_info函数优化
文章地址:http://6mz.cn/article/joosid.html

其他资讯