/*
* Copyright (c) 2010 Nicolas George
* Copyright (c) 2011 Stefano Sabatini
* Copyright (c) 2014 Andrey Utkin
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*//**
* @file
* API example for demuxing, decoding, filtering, encoding and muxing
* @example transcoding.c
*/#include<libavcodec/avcodec.h>#include<libavformat/avformat.h>#include<libavfilter/buffersink.h>#include<libavfilter/buffersrc.h>#include<libavutil/opt.h>#include<libavutil/pixdesc.h>static AVFormatContext *ifmt_ctx;static AVFormatContext *ofmt_ctx;typedefstructFilteringContext{
AVFilterContext *buffersink_ctx;
AVFilterContext *buffersrc_ctx;
AVFilterGraph *filter_graph;} FilteringContext;static FilteringContext *filter_ctx;typedefstructStreamContext{
AVCodecContext *dec_ctx;
AVCodecContext *enc_ctx;} StreamContext;static StreamContext *stream_ctx;staticintopen_input_file(constchar*filename){int ret;unsignedint i;
ifmt_ctx =NULL;if((ret =avformat_open_input(&ifmt_ctx, filename,NULL,NULL))<0){av_log(NULL, AV_LOG_ERROR,"Cannot open input file\n");return ret;}if((ret =avformat_find_stream_info(ifmt_ctx,NULL))<0){av_log(NULL, AV_LOG_ERROR,"Cannot find stream information\n");return ret;}
stream_ctx =av_mallocz_array(ifmt_ctx->nb_streams,sizeof(*stream_ctx));if(!stream_ctx)returnAVERROR(ENOMEM);for(i =0; i < ifmt_ctx->nb_streams; i++){
AVStream *stream = ifmt_ctx->streams[i];
AVCodec *dec =avcodec_find_decoder(stream->codecpar->codec_id);
AVCodecContext *codec_ctx;if(!dec){av_log(NULL, AV_LOG_ERROR,"Failed to find decoder for stream #%u\n", i);return AVERROR_DECODER_NOT_FOUND;}
codec_ctx =avcodec_alloc_context3(dec);if(!codec_ctx){av_log(NULL, AV_LOG_ERROR,"Failed to allocate the decoder context for stream #%u\n", i);returnAVERROR(ENOMEM);}
ret =avcodec_parameters_to_context(codec_ctx, stream->codecpar);if(ret <0){av_log(NULL, AV_LOG_ERROR,"Failed to copy decoder parameters to input decoder context ""for stream #%u\n", i);return ret;}/* Reencode video & audio and remux subtitles etc. */if(codec_ctx->codec_type == AVMEDIA_TYPE_VIDEO
|| codec_ctx->codec_type == AVMEDIA_TYPE_AUDIO){if(codec_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
codec_ctx->framerate =av_guess_frame_rate(ifmt_ctx, stream,NULL);/* Open decoder */
ret =avcodec_open2(codec_ctx, dec,NULL);if(ret <0){av_log(NULL, AV_LOG_ERROR,"Failed to open decoder for stream #%u\n", i);return ret;}}
stream_ctx[i].dec_ctx = codec_ctx;}av_dump_format(ifmt_ctx,0, filename,0);return0;}staticintopen_output_file(constchar*filename){
AVStream *out_stream;
AVStream *in_stream;
AVCodecContext *dec_ctx,*enc_ctx;
AVCodec *encoder;int ret;unsignedint i;
ofmt_ctx =NULL;avformat_alloc_output_context2(&ofmt_ctx,NULL,NULL, filename);if(!ofmt_ctx){av_log(NULL, AV_LOG_ERROR,"Could not create output context\n");return AVERROR_UNKNOWN;}for(i =0; i < ifmt_ctx->nb_streams; i++){
out_stream =avformat_new_stream(ofmt_ctx,NULL);if(!out_stream){av_log(NULL, AV_LOG_ERROR,"Failed allocating output stream\n");return AVERROR_UNKNOWN;}
in_stream = ifmt_ctx->streams[i];
dec_ctx = stream_ctx[i].dec_ctx;if(dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO
|| dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO){/* in this example, we choose transcoding to same codec */
encoder =avcodec_find_encoder(dec_ctx->codec_id);if(!encoder){av_log(NULL, AV_LOG_FATAL,"Necessary encoder not found\n");return AVERROR_INVALIDDATA;}
enc_ctx =avcodec_alloc_context3(encoder);if(!enc_ctx){av_log(NULL, AV_LOG_FATAL,"Failed to allocate the encoder context\n");returnAVERROR(ENOMEM);}/* In this example, we transcode to same properties (picture size,
* sample rate etc.). These properties can be changed for output
* streams easily using filters */if(dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO){
enc_ctx->height = dec_ctx->height;
enc_ctx->width = dec_ctx->width;
enc_ctx->sample_aspect_ratio = dec_ctx->sample_aspect_ratio;/* take first format from list of supported formats */if(encoder->pix_fmts)
enc_ctx->pix_fmt = encoder->pix_fmts[0];else
enc_ctx->pix_fmt = dec_ctx->pix_fmt;/* video time_base can be set to whatever is handy and supported by encoder */
enc_ctx->time_base =av_inv_q(dec_ctx->framerate);}else{
enc_ctx->sample_rate = dec_ctx->sample_rate;
enc_ctx->channel_layout = dec_ctx->channel_layout;
enc_ctx->channels =av_get_channel_layout_nb_channels(enc_ctx->channel_layout);/* take first format from list of supported formats */
enc_ctx->sample_fmt = encoder->sample_fmts[0];
enc_ctx->time_base =(AVRational){1, enc_ctx->sample_rate};}if(ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER)
enc_ctx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;/* Third parameter can be used to pass settings to encoder */
ret =avcodec_open2(enc_ctx, encoder,NULL);if(ret <0){av_log(NULL, AV_LOG_ERROR,"Cannot open video encoder for stream #%u\n", i);return ret;}
ret =avcodec_parameters_from_context(out_stream->codecpar, enc_ctx);if(ret <0){av_log(NULL, AV_LOG_ERROR,"Failed to copy encoder parameters to output stream #%u\n", i);return ret;}
out_stream->time_base = enc_ctx->time_base;
stream_ctx[i].enc_ctx = enc_ctx;}elseif(dec_ctx->codec_type == AVMEDIA_TYPE_UNKNOWN){av_log(NULL, AV_LOG_FATAL,"Elementary stream #%d is of unknown type, cannot proceed\n", i);return AVERROR_INVALIDDATA;}else{/* if this stream must be remuxed */
ret =avcodec_parameters_copy(out_stream->codecpar, in_stream->codecpar);if(ret <0){av_log(NULL, AV_LOG_ERROR,"Copying parameters for stream #%u failed\n", i);return ret;}
out_stream->time_base = in_stream->time_base;}}av_dump_format(ofmt_ctx,0, filename,1);if(!(ofmt_ctx->oformat->flags & AVFMT_NOFILE)){
ret =avio_open(&ofmt_ctx->pb, filename, AVIO_FLAG_WRITE);if(ret <0){av_log(NULL, AV_LOG_ERROR,"Could not open output file '%s'", filename);return ret;}}/* init muxer, write output file header */
ret =avformat_write_header(ofmt_ctx,NULL);if(ret <0){av_log(NULL, AV_LOG_ERROR,"Error occurred when opening output file\n");return ret;}return0;}staticintinit_filter(FilteringContext* fctx, AVCodecContext *dec_ctx,
AVCodecContext *enc_ctx,constchar*filter_spec){char args[512];int ret =0;const AVFilter *buffersrc =NULL;const AVFilter *buffersink =NULL;
AVFilterContext *buffersrc_ctx =NULL;
AVFilterContext *buffersink_ctx =NULL;
AVFilterInOut *outputs =avfilter_inout_alloc();
AVFilterInOut *inputs =avfilter_inout_alloc();
AVFilterGraph *filter_graph =avfilter_graph_alloc();if(!outputs ||!inputs ||!filter_graph){
ret =AVERROR(ENOMEM);goto end;}if(dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO){
buffersrc =avfilter_get_by_name("buffer");
buffersink =avfilter_get_by_name("buffersink");if(!buffersrc ||!buffersink){av_log(NULL, AV_LOG_ERROR,"filtering source or sink element not found\n");
ret = AVERROR_UNKNOWN;goto end;}snprintf(args,sizeof(args),"video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
dec_ctx->width, dec_ctx->height, dec_ctx->pix_fmt,
dec_ctx->time_base.num, dec_ctx->time_base.den,
dec_ctx->sample_aspect_ratio.num,
dec_ctx->sample_aspect_ratio.den);
ret =avfilter_graph_create_filter(&buffersrc_ctx, buffersrc,"in",
args,NULL, filter_graph);if(ret <0){av_log(NULL, AV_LOG_ERROR,"Cannot create buffer source\n");goto end;}
ret =avfilter_graph_create_filter(&buffersink_ctx, buffersink,"out",NULL,NULL, filter_graph);if(ret <0){av_log(NULL, AV_LOG_ERROR,"Cannot create buffer sink\n");goto end;}
ret =av_opt_set_bin(buffersink_ctx,"pix_fmts",(uint8_t*)&enc_ctx->pix_fmt,sizeof(enc_ctx->pix_fmt),
AV_OPT_SEARCH_CHILDREN);if(ret <0){av_log(NULL, AV_LOG_ERROR,"Cannot set output pixel format\n");goto end;}}elseif(dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO){
buffersrc =avfilter_get_by_name("abuffer");
buffersink =avfilter_get_by_name("abuffersink");if(!buffersrc ||!buffersink){av_log(NULL, AV_LOG_ERROR,"filtering source or sink element not found\n");
ret = AVERROR_UNKNOWN;goto end;}if(!dec_ctx->channel_layout)
dec_ctx->channel_layout =av_get_default_channel_layout(dec_ctx->channels);snprintf(args,sizeof(args),"time_base=%d/%d:sample_rate=%d:sample_fmt=%s:channel_layout=0x%"PRIx64,
dec_ctx->time_base.num, dec_ctx->time_base.den, dec_ctx->sample_rate,av_get_sample_fmt_name(dec_ctx->sample_fmt),
dec_ctx->channel_layout);
ret =avfilter_graph_create_filter(&buffersrc_ctx, buffersrc,"in",
args,NULL, filter_graph);if(ret <0){av_log(NULL, AV_LOG_ERROR,"Cannot create audio buffer source\n");goto end;}
ret =avfilter_graph_create_filter(&buffersink_ctx, buffersink,"out",NULL,NULL, filter_graph);if(ret <0){av_log(NULL, AV_LOG_ERROR,"Cannot create audio buffer sink\n");goto end;}
ret =av_opt_set_bin(buffersink_ctx,"sample_fmts",(uint8_t*)&enc_ctx->sample_fmt,sizeof(enc_ctx->sample_fmt),
AV_OPT_SEARCH_CHILDREN);if(ret <0){av_log(NULL, AV_LOG_ERROR,"Cannot set output sample format\n");goto end;}
ret =av_opt_set_bin(buffersink_ctx,"channel_layouts",(uint8_t*)&enc_ctx->channel_layout,sizeof(enc_ctx->channel_layout), AV_OPT_SEARCH_CHILDREN);if(ret <0){av_log(NULL, AV_LOG_ERROR,"Cannot set output channel layout\n");goto end;}
ret =av_opt_set_bin(buffersink_ctx,"sample_rates",(uint8_t*)&enc_ctx->sample_rate,sizeof(enc_ctx->sample_rate),
AV_OPT_SEARCH_CHILDREN);if(ret <0){av_log(NULL, AV_LOG_ERROR,"Cannot set output sample rate\n");goto end;}}else{
ret = AVERROR_UNKNOWN;goto end;}/* Endpoints for the filter graph. */
outputs->name =av_strdup("in");
outputs->filter_ctx = buffersrc_ctx;
outputs->pad_idx =0;
outputs->next =NULL;
inputs->name =av_strdup("out");
inputs->filter_ctx = buffersink_ctx;
inputs->pad_idx =0;
inputs->next =NULL;if(!outputs->name ||!inputs->name){
ret =AVERROR(ENOMEM);goto end;}if((ret =avfilter_graph_parse_ptr(filter_graph, filter_spec,&inputs,&outputs,NULL))<0)goto end;if((ret =avfilter_graph_config(filter_graph,NULL))<0)goto end;/* Fill FilteringContext */
fctx->buffersrc_ctx = buffersrc_ctx;
fctx->buffersink_ctx = buffersink_ctx;
fctx->filter_graph = filter_graph;
end:avfilter_inout_free(&inputs);avfilter_inout_free(&outputs);return ret;}staticintinit_filters(void){constchar*filter_spec;unsignedint i;int ret;
filter_ctx =av_malloc_array(ifmt_ctx->nb_streams,sizeof(*filter_ctx));if(!filter_ctx)returnAVERROR(ENOMEM);for(i =0; i < ifmt_ctx->nb_streams; i++){
filter_ctx[i].buffersrc_ctx =NULL;
filter_ctx[i].buffersink_ctx =NULL;
filter_ctx[i].filter_graph =NULL;if(!(ifmt_ctx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_AUDIO
|| ifmt_ctx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO))continue;if(ifmt_ctx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO)
filter_spec ="null";/* passthrough (dummy) filter for video */else
filter_spec ="anull";/* passthrough (dummy) filter for audio */
ret =init_filter(&filter_ctx[i], stream_ctx[i].dec_ctx,
stream_ctx[i].enc_ctx, filter_spec);if(ret)return ret;}return0;}staticintencode_write_frame(AVFrame *filt_frame,unsignedint stream_index,int*got_frame){int ret;int got_frame_local;
AVPacket enc_pkt;int(*enc_func)(AVCodecContext *, AVPacket *,const AVFrame *,int*)=(ifmt_ctx->streams[stream_index]->codecpar->codec_type ==
AVMEDIA_TYPE_VIDEO)? avcodec_encode_video2 : avcodec_encode_audio2;if(!got_frame)
got_frame =&got_frame_local;av_log(NULL, AV_LOG_INFO,"Encoding frame\n");/* encode filtered frame */
enc_pkt.data =NULL;
enc_pkt.size =0;av_init_packet(&enc_pkt);
ret =enc_func(stream_ctx[stream_index].enc_ctx,&enc_pkt,
filt_frame, got_frame);av_frame_free(&filt_frame);if(ret <0)return ret;if(!(*got_frame))return0;/* prepare packet for muxing */
enc_pkt.stream_index = stream_index;av_packet_rescale_ts(&enc_pkt,
stream_ctx[stream_index].enc_ctx->time_base,
ofmt_ctx->streams[stream_index]->time_base);av_log(NULL, AV_LOG_DEBUG,"Muxing frame\n");/* mux encoded frame */
ret =av_interleaved_write_frame(ofmt_ctx,&enc_pkt);return ret;}staticintfilter_encode_write_frame(AVFrame *frame,unsignedint stream_index){int ret;
AVFrame *filt_frame;av_log(NULL, AV_LOG_INFO,"Pushing decoded frame to filters\n");/* push the decoded frame into the filtergraph */
ret =av_buffersrc_add_frame_flags(filter_ctx[stream_index].buffersrc_ctx,
frame,0);if(ret <0){av_log(NULL, AV_LOG_ERROR,"Error while feeding the filtergraph\n");return ret;}/* pull filtered frames from the filtergraph */while(1){
filt_frame =av_frame_alloc();if(!filt_frame){
ret =AVERROR(ENOMEM);break;}av_log(NULL, AV_LOG_INFO,"Pulling filtered frame from filters\n");
ret =av_buffersink_get_frame(filter_ctx[stream_index].buffersink_ctx,
filt_frame);if(ret <0){/* if no more frames for output - returns AVERROR(EAGAIN)
* if flushed and no more frames for output - returns AVERROR_EOF
* rewrite retcode to 0 to show it as normal procedure completion
*/if(ret ==AVERROR(EAGAIN)|| ret == AVERROR_EOF)
ret =0;av_frame_free(&filt_frame);break;}
filt_frame->pict_type = AV_PICTURE_TYPE_NONE;
ret =encode_write_frame(filt_frame, stream_index,NULL);if(ret <0)break;}return ret;}staticintflush_encoder(unsignedint stream_index){int ret;int got_frame;if(!(stream_ctx[stream_index].enc_ctx->codec->capabilities &
AV_CODEC_CAP_DELAY))return0;while(1){av_log(NULL, AV_LOG_INFO,"Flushing stream #%u encoder\n", stream_index);
ret =encode_write_frame(NULL, stream_index,&got_frame);if(ret <0)break;if(!got_frame)return0;}return ret;}intmain(int argc,char**argv){int ret;
AVPacket packet ={.data =NULL,.size =0};
AVFrame *frame =NULL;enumAVMediaType type;unsignedint stream_index;unsignedint i;int got_frame;int(*dec_func)(AVCodecContext *, AVFrame *,int*,const AVPacket *);if(argc !=3){av_log(NULL, AV_LOG_ERROR,"Usage: %s <input file> <output file>\n", argv[0]);return1;}if((ret =open_input_file(argv[1]))<0)goto end;if((ret =open_output_file(argv[2]))<0)goto end;if((ret =init_filters())<0)goto end;/* read all packets */while(1){if((ret =av_read_frame(ifmt_ctx,&packet))<0)break;
stream_index = packet.stream_index;
type = ifmt_ctx->streams[packet.stream_index]->codecpar->codec_type;av_log(NULL, AV_LOG_DEBUG,"Demuxer gave frame of stream_index %u\n",
stream_index);if(filter_ctx[stream_index].filter_graph){av_log(NULL, AV_LOG_DEBUG,"Going to reencode&filter the frame\n");
frame =av_frame_alloc();if(!frame){
ret =AVERROR(ENOMEM);break;}av_packet_rescale_ts(&packet,
ifmt_ctx->streams[stream_index]->time_base,
stream_ctx[stream_index].dec_ctx->time_base);
dec_func =(type == AVMEDIA_TYPE_VIDEO)? avcodec_decode_video2 :
avcodec_decode_audio4;
ret =dec_func(stream_ctx[stream_index].dec_ctx, frame,&got_frame,&packet);if(ret <0){av_frame_free(&frame);av_log(NULL, AV_LOG_ERROR,"Decoding failed\n");break;}if(got_frame){
frame->pts = frame->best_effort_timestamp;
ret =filter_encode_write_frame(frame, stream_index);av_frame_free(&frame);if(ret <0)goto end;}else{av_frame_free(&frame);}}else{/* remux this frame without reencoding */av_packet_rescale_ts(&packet,
ifmt_ctx->streams[stream_index]->time_base,
ofmt_ctx->streams[stream_index]->time_base);
ret =av_interleaved_write_frame(ofmt_ctx,&packet);if(ret <0)goto end;}av_packet_unref(&packet);}/* flush filters and encoders */for(i =0; i < ifmt_ctx->nb_streams; i++){/* flush filter */if(!filter_ctx[i].filter_graph)continue;
ret =filter_encode_write_frame(NULL, i);if(ret <0){av_log(NULL, AV_LOG_ERROR,"Flushing filter failed\n");goto end;}/* flush encoder */
ret =flush_encoder(i);if(ret <0){av_log(NULL, AV_LOG_ERROR,"Flushing encoder failed\n");goto end;}}av_write_trailer(ofmt_ctx);
end:av_packet_unref(&packet);av_frame_free(&frame);for(i =0; i < ifmt_ctx->nb_streams; i++){avcodec_free_context(&stream_ctx[i].dec_ctx);if(ofmt_ctx && ofmt_ctx->nb_streams > i && ofmt_ctx->streams[i]&& stream_ctx[i].enc_ctx)avcodec_free_context(&stream_ctx[i].enc_ctx);if(filter_ctx && filter_ctx[i].filter_graph)avfilter_graph_free(&filter_ctx[i].filter_graph);}av_free(filter_ctx);av_free(stream_ctx);avformat_close_input(&ifmt_ctx);if(ofmt_ctx &&!(ofmt_ctx->oformat->flags & AVFMT_NOFILE))avio_closep(&ofmt_ctx->pb);avformat_free_context(ofmt_ctx);if(ret <0)av_log(NULL, AV_LOG_ERROR,"Error occurred: %s\n",av_err2str(ret));return ret ?1:0;}
1. $nextTick
2.过渡与动画
一. $nextTick
① 语法:this.$nextTick(回调函数)
② 作用:在下一次DOM更新结束后执行其指定的回调
nextTick所指定的回调会在DOM节点更新后执行 ③ 使用场景:当改变数据后,要基于更新后的新 DOM …