摘要:基本上就是對(duì)一個(gè)數(shù)據(jù)幀的描述。我理解的是一個(gè)未解碼的壓縮數(shù)據(jù)幀。
read_thread這個(gè)最關(guān)鍵的讀取線程中,逐步跟蹤,可以明確stream_component_open---> decoder_start---> video_thread--->ffplay_video_thread。這個(gè)調(diào)用過程,在解碼開始后的異步解碼線程中,調(diào)用的是ffplay_video_thread。具體可見續(xù)1。這個(gè)函數(shù)是解碼處理視頻的核心:
static int ffplay_video_thread(void *arg) { FFPlayer *ffp = arg; VideoState *is = ffp->is; AVFrame *frame = av_frame_alloc(); double pts; double duration; int ret; AVRational tb = is->video_st->time_base; AVRational frame_rate = av_guess_frame_rate(is->ic, is->video_st, NULL); #if CONFIG_AVFILTER AVFilterGraph *graph = avfilter_graph_alloc(); AVFilterContext *filt_out = NULL, *filt_in = NULL; int last_w = 0; int last_h = 0; enum AVPixelFormat last_format = -2; int last_serial = -1; int last_vfilter_idx = 0; if (!graph) { av_frame_free(&frame); return AVERROR(ENOMEM); } #else ffp_notify_msg2(ffp, FFP_MSG_VIDEO_ROTATION_CHANGED, ffp_get_video_rotate_degrees(ffp)); #endif if (!frame) { #if CONFIG_AVFILTER avfilter_graph_free(&graph); #endif return AVERROR(ENOMEM); } for (;;) { ret = get_video_frame(ffp, frame); if (ret < 0) goto the_end; if (!ret) continue; #if CONFIG_AVFILTER if ( last_w != frame->width || last_h != frame->height || last_format != frame->format || last_serial != is->viddec.pkt_serial || ffp->vf_changed || last_vfilter_idx != is->vfilter_idx) { SDL_LockMutex(ffp->vf_mutex); ffp->vf_changed = 0; av_log(NULL, AV_LOG_DEBUG, "Video frame changed from size:%dx%d format:%s serial:%d to size:%dx%d format:%s serial:%d ", last_w, last_h, (const char *)av_x_if_null(av_get_pix_fmt_name(last_format), "none"), last_serial, frame->width, frame->height, (const char *)av_x_if_null(av_get_pix_fmt_name(frame->format), "none"), is->viddec.pkt_serial); avfilter_graph_free(&graph); graph = avfilter_graph_alloc(); if ((ret = configure_video_filters(ffp, graph, is, ffp->vfilters_list ? ffp->vfilters_list[is->vfilter_idx] : NULL, frame)) < 0) { // FIXME: post error SDL_UnlockMutex(ffp->vf_mutex); goto the_end; } filt_in = is->in_video_filter; filt_out = is->out_video_filter; last_w = frame->width; last_h = frame->height; last_format = frame->format; last_serial = is->viddec.pkt_serial; last_vfilter_idx = is->vfilter_idx; frame_rate = filt_out->inputs[0]->frame_rate; SDL_UnlockMutex(ffp->vf_mutex); } ret = av_buffersrc_add_frame(filt_in, frame); if (ret < 0) goto the_end; while (ret >= 0) { is->frame_last_returned_time = av_gettime_relative() / 1000000.0; ret = av_buffersink_get_frame_flags(filt_out, frame, 0); if (ret < 0) { if (ret == AVERROR_EOF) is->viddec.finished = is->viddec.pkt_serial; ret = 0; break; } is->frame_last_filter_delay = av_gettime_relative() / 1000000.0 - is->frame_last_returned_time; if (fabs(is->frame_last_filter_delay) > AV_NOSYNC_THRESHOLD / 10.0) is->frame_last_filter_delay = 0; tb = filt_out->inputs[0]->time_base; #endif duration = (frame_rate.num && frame_rate.den ? av_q2d((AVRational){frame_rate.den, frame_rate.num}) : 0); pts = (frame->pts == AV_NOPTS_VALUE) ? NAN : frame->pts * av_q2d(tb); ret = queue_picture(ffp, frame, pts, duration, av_frame_get_pkt_pos(frame), is->viddec.pkt_serial); av_frame_unref(frame); #if CONFIG_AVFILTER } #endif if (ret < 0) goto the_end; } the_end: #if CONFIG_AVFILTER avfilter_graph_free(&graph); #endif av_frame_free(&frame); return 0; }
前面的初始化過程暫不分析,直接看for(;;)開始的這個(gè)循環(huán),1.get_video_frame讀取一幀;2.av_buffersrc_add_frame添加幀到緩沖中;3.queue_picture將幀數(shù)據(jù)通過ffmpeg解碼后轉(zhuǎn)為yup格式幀,然后調(diào)用sol進(jìn)行渲染。大體是這3個(gè)步驟。
雖然前文已有介紹get_video_frame,但是太粗略了,這次仔細(xì)進(jìn)去看下:
static int get_video_frame(FFPlayer *ffp, AVFrame *frame) { VideoState *is = ffp->is; int got_picture; ffp_video_statistic_l(ffp); if ((got_picture = decoder_decode_frame(ffp, &is->viddec, frame, NULL)) < 0) return -1; if (got_picture) { double dpts = NAN; if (frame->pts != AV_NOPTS_VALUE) dpts = av_q2d(is->video_st->time_base) * frame->pts; frame->sample_aspect_ratio = av_guess_sample_aspect_ratio(is->ic, is->video_st, frame); if (ffp->framedrop>0 || (ffp->framedrop && get_master_sync_type(is) != AV_SYNC_VIDEO_MASTER)) { if (frame->pts != AV_NOPTS_VALUE) { double diff = dpts - get_master_clock(is); if (!isnan(diff) && fabs(diff) < AV_NOSYNC_THRESHOLD && diff - is->frame_last_filter_delay < 0 && is->viddec.pkt_serial == is->vidclk.serial && is->videoq.nb_packets) { is->frame_drops_early++; is->continuous_frame_drops_early++; if (is->continuous_frame_drops_early > ffp->framedrop) { is->continuous_frame_drops_early = 0; } else { av_frame_unref(frame); got_picture = 0; } } } } } return got_picture; }
decoder_decode_frame毫無疑問是個(gè)關(guān)鍵,解碼frame:
static int decoder_decode_frame(FFPlayer *ffp, Decoder *d, AVFrame *frame, AVSubtitle *sub) { int got_frame = 0; do { int ret = -1; if (d->queue->abort_request) return -1; if (!d->packet_pending || d->queue->serial != d->pkt_serial) { AVPacket pkt; do { if (d->queue->nb_packets == 0) SDL_CondSignal(d->empty_queue_cond); if (packet_queue_get_or_buffering(ffp, d->queue, &pkt, &d->pkt_serial, &d->finished) < 0) return -1; if (pkt.data == flush_pkt.data) { avcodec_flush_buffers(d->avctx); d->finished = 0; d->next_pts = d->start_pts; d->next_pts_tb = d->start_pts_tb; } } while (pkt.data == flush_pkt.data || d->queue->serial != d->pkt_serial); av_packet_unref(&d->pkt); d->pkt_temp = d->pkt = pkt; d->packet_pending = 1; } switch (d->avctx->codec_type) { case AVMEDIA_TYPE_VIDEO: { ret = avcodec_decode_video2(d->avctx, frame, &got_frame, &d->pkt_temp); if (got_frame) { ffp->stat.vdps = SDL_SpeedSamplerAdd(&ffp->vdps_sampler, FFP_SHOW_VDPS_AVCODEC, "vdps[avcodec]"); if (ffp->decoder_reorder_pts == -1) { frame->pts = av_frame_get_best_effort_timestamp(frame); } else if (!ffp->decoder_reorder_pts) { frame->pts = frame->pkt_dts; } } } break; case AVMEDIA_TYPE_AUDIO: ret = avcodec_decode_audio4(d->avctx, frame, &got_frame, &d->pkt_temp); if (got_frame) { AVRational tb = (AVRational){1, frame->sample_rate}; if (frame->pts != AV_NOPTS_VALUE) frame->pts = av_rescale_q(frame->pts, av_codec_get_pkt_timebase(d->avctx), tb); else if (d->next_pts != AV_NOPTS_VALUE) frame->pts = av_rescale_q(d->next_pts, d->next_pts_tb, tb); if (frame->pts != AV_NOPTS_VALUE) { d->next_pts = frame->pts + frame->nb_samples; d->next_pts_tb = tb; } } break; case AVMEDIA_TYPE_SUBTITLE: ret = avcodec_decode_subtitle2(d->avctx, sub, &got_frame, &d->pkt_temp); break; default: break; } if (ret < 0) { d->packet_pending = 0; } else { d->pkt_temp.dts = d->pkt_temp.pts = AV_NOPTS_VALUE; if (d->pkt_temp.data) { if (d->avctx->codec_type != AVMEDIA_TYPE_AUDIO) ret = d->pkt_temp.size; d->pkt_temp.data += ret; d->pkt_temp.size -= ret; if (d->pkt_temp.size <= 0) d->packet_pending = 0; } else { if (!got_frame) { d->packet_pending = 0; d->finished = d->pkt_serial; } } } } while (!got_frame && !d->finished); return got_frame; }
一個(gè)大循環(huán)(一直到?jīng)]有幀或者結(jié)尾為止)里面套著一個(gè)小循環(huán)和一個(gè)switch case的判斷,以及末尾的一些狀態(tài)更新。先來看小循環(huán):
AVPacket pkt; do { if (d->queue->nb_packets == 0) SDL_CondSignal(d->empty_queue_cond); if (packet_queue_get_or_buffering(ffp, d->queue, &pkt, &d->pkt_serial, &d->finished) < 0) return -1; if (pkt.data == flush_pkt.data) { avcodec_flush_buffers(d->avctx); d->finished = 0; d->next_pts = d->start_pts; d->next_pts_tb = d->start_pts_tb; } } while (pkt.data == flush_pkt.data || d->queue->serial != d->pkt_serial); av_packet_unref(&d->pkt); d->pkt_temp = d->pkt = pkt; d->packet_pending = 1;
這里看到一個(gè)關(guān)鍵的數(shù)據(jù)結(jié)構(gòu)AVPacket,表示的是音視頻的一個(gè)數(shù)據(jù)幀:
typedef struct AVPacket { /** * A reference to the reference-counted buffer where the packet data is * stored. * May be NULL, then the packet data is not reference-counted. */ AVBufferRef *buf; /** * Presentation timestamp in AVStream->time_base units; the time at which * the decompressed packet will be presented to the user. * Can be AV_NOPTS_VALUE if it is not stored in the file. * pts MUST be larger or equal to dts as presentation cannot happen before * decompression, unless one wants to view hex dumps. Some formats misuse * the terms dts and pts/cts to mean something different. Such timestamps * must be converted to true pts/dts before they are stored in AVPacket. */ int64_t pts; /** * Decompression timestamp in AVStream->time_base units; the time at which * the packet is decompressed. * Can be AV_NOPTS_VALUE if it is not stored in the file. */ int64_t dts; uint8_t *data; int size; int stream_index; /** * A combination of AV_PKT_FLAG values */ int flags; /** * Additional packet data that can be provided by the container. * Packet can contain several types of side information. */ AVPacketSideData *side_data; int side_data_elems; /** * Duration of this packet in AVStream->time_base units, 0 if unknown. * Equals next_pts - this_pts in presentation order. */ int64_t duration; int64_t pos; ///< byte position in stream, -1 if unknown #if FF_API_CONVERGENCE_DURATION /** * @deprecated Same as the duration field, but as int64_t. This was required * for Matroska subtitles, whose duration values could overflow when the * duration field was still an int. */ attribute_deprecated int64_t convergence_duration; #endif } AVPacket;
可以看到有顯示和解碼的時(shí)間戳dts pts,有在網(wǎng)絡(luò)流中的位置pos,實(shí)際數(shù)據(jù)指針data,大小size,所屬流的索引stream_index。基本上就是對(duì)一個(gè)數(shù)據(jù)幀的描述。我理解的是一個(gè)未解碼的壓縮數(shù)據(jù)幀。
回到小循環(huán)里看,packet_queue_get_or_buffering,讀取一個(gè)壓縮數(shù)據(jù)幀:
static int packet_queue_get_or_buffering(FFPlayer *ffp, PacketQueue *q, AVPacket *pkt, int *serial, int *finished) { assert(finished); if (!ffp->packet_buffering) return packet_queue_get(q, pkt, 1, serial); while (1) { int new_packet = packet_queue_get(q, pkt, 0, serial); if (new_packet < 0) return -1; else if (new_packet == 0) { if (q->is_buffer_indicator && !*finished) ffp_toggle_buffering(ffp, 1); new_packet = packet_queue_get(q, pkt, 1, serial); if (new_packet < 0) return -1; } if (*finished == *serial) { av_packet_unref(pkt); continue; } else break; } return 1; }
packet_queue_get是從隊(duì)列中獲取一個(gè)pkt,但是他的參數(shù)不同調(diào)用的含義并不相同:
/* return < 0 if aborted, 0 if no packet and > 0 if packet. */ static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block, int *serial) { MyAVPacketList *pkt1; int ret; SDL_LockMutex(q->mutex); for (;;) { if (q->abort_request) { ret = -1; break; } pkt1 = q->first_pkt; if (pkt1) { q->first_pkt = pkt1->next; if (!q->first_pkt) q->last_pkt = NULL; q->nb_packets--; q->size -= pkt1->pkt.size + sizeof(*pkt1); q->duration -= pkt1->pkt.duration; *pkt = pkt1->pkt; if (serial) *serial = pkt1->serial; #ifdef FFP_MERGE av_free(pkt1); #else pkt1->next = q->recycle_pkt; q->recycle_pkt = pkt1; #endif ret = 1; break; } else if (!block) { ret = 0; break; } else { SDL_CondWait(q->cond, q->mutex); } } SDL_UnlockMutex(q->mutex); return ret; }
又是個(gè)循環(huán),如果被終止了,直接返回-1。讀取隊(duì)列(其實(shí)是個(gè)鏈表)中的第一個(gè)pkt,然后將其出隊(duì),下一個(gè)成為第一個(gè)。如果沒讀到有2種情況,根據(jù)參數(shù)block(是否阻塞),非阻塞直接返回0,阻塞線程等待條件喚醒,條件符合喚醒后繼續(xù)執(zhí)行循環(huán),從頭開始讀取。
好吧,回來看packet_queue_get_or_buffering,開頭就是一個(gè)判斷,如果不在緩存中,直接按照阻塞方式讀取pkt,并返回(這意味著網(wǎng)絡(luò)傳輸還未收到數(shù)據(jù)包,因此需要先休眠,直到有數(shù)據(jù)到來后再進(jìn)行處理)。下面的while(1)開始是處理緩存中已經(jīng)可以讀到數(shù)據(jù)包的情況。首先進(jìn)行非阻塞讀取,如果被終止,直接返回-1,否則如果沒有pkt,ffp_toggle_buffering更新buffer,然后在阻塞讀取。那么這個(gè)ffp_toggle_buffering在干什么呢?往下跟蹤2層,是ffp_toggle_buffering_l函數(shù):
void ffp_toggle_buffering_l(FFPlayer *ffp, int buffering_on) { if (!ffp->packet_buffering) return; VideoState *is = ffp->is; if (buffering_on && !is->buffering_on) { av_log(ffp, AV_LOG_DEBUG, "ffp_toggle_buffering_l: start "); is->buffering_on = 1; stream_update_pause_l(ffp); ffp_notify_msg1(ffp, FFP_MSG_BUFFERING_START); } else if (!buffering_on && is->buffering_on){ av_log(ffp, AV_LOG_DEBUG, "ffp_toggle_buffering_l: end "); is->buffering_on = 0; stream_update_pause_l(ffp); ffp_notify_msg1(ffp, FFP_MSG_BUFFERING_END); } }
無論什么情況,大體都會(huì)走stream_update_pause_l,然后進(jìn)行消息通知,好吧,看看stream_update_pause_l,往下走2層是stream_toggle_pause_l:
static void stream_toggle_pause_l(FFPlayer *ffp, int pause_on) { VideoState *is = ffp->is; if (is->paused && !pause_on) { is->frame_timer += av_gettime_relative() / 1000000.0 - is->vidclk.last_updated; #ifdef FFP_MERGE if (is->read_pause_return != AVERROR(ENOSYS)) { is->vidclk.paused = 0; } #endif set_clock(&is->vidclk, get_clock(&is->vidclk), is->vidclk.serial); } else { } set_clock(&is->extclk, get_clock(&is->extclk), is->extclk.serial); is->paused = is->audclk.paused = is->vidclk.paused = is->extclk.paused = pause_on; SDL_AoutPauseAudio(ffp->aout, pause_on); }
這不是暫停與恢復(fù)的調(diào)用嗎。好吧,咱們回顧一下,也就是說,讀取pkt的過程,會(huì)先讀取緩存,如果有直接返回,如果換成讀取到的是0,也就是沒內(nèi)容,那么要阻塞在這里,同時(shí)暫停播放,那么也即是咱們?cè)诳匆曨l的時(shí)候出現(xiàn)的緩沖等待的情況了。
回到decoder_decode_frame的小循環(huán)里。小循環(huán)的意思大約是讀取pkt,直到與全局的flush_pkt不相等,我的理解是flush_pkt類似一個(gè)標(biāo)記的作用,用來表示到達(dá)了改解碼的那個(gè)pkt。在此之前循環(huán)尋找緩存中的pkt(不知對(duì)不對(duì),歡迎指正)。
往下繼續(xù)看小循環(huán)之后的switch case,以video的case為例:
case AVMEDIA_TYPE_VIDEO: { ret = avcodec_decode_video2(d->avctx, frame, &got_frame, &d->pkt_temp); if (got_frame) { ffp->stat.vdps = SDL_SpeedSamplerAdd(&ffp->vdps_sampler, FFP_SHOW_VDPS_AVCODEC, "vdps[avcodec]"); if (ffp->decoder_reorder_pts == -1) { frame->pts = av_frame_get_best_effort_timestamp(frame); } else if (!ffp->decoder_reorder_pts) { frame->pts = frame->pkt_dts; } } } break;
這里調(diào)用avcodec_decode_video2解碼,傳遞進(jìn)入剛才的pkt,如果獲取的got_frame有正常,則調(diào)用sdl準(zhǔn)備開始顯示,并且更新下pts。
解碼的過程后續(xù)有機(jī)會(huì)再分析。現(xiàn)在還是有個(gè)疑問,flush_pkt到底是個(gè)什么?我上面的猜測(cè)不知道對(duì)不對(duì)。繼續(xù)找找線索吧。在ffp_global_init中:
av_init_packet(&flush_pkt); flush_pkt.data = (uint8_t *)&flush_pkt;
初始化清空,并且將他的data賦值為自己的地址。有點(diǎn)奇怪,繼續(xù)找:
static void packet_queue_start(PacketQueue *q) { SDL_LockMutex(q->mutex); q->abort_request = 0; packet_queue_put_private(q, &flush_pkt); SDL_UnlockMutex(q->mutex); }
在初始化隊(duì)列的時(shí)候就加入了這個(gè)都是空的pkt。那么之前的小循環(huán)的地方是否可理解為讀取pkt,直到緩存隊(duì)列中沒東西為止?不敢肯定,這里先留個(gè)疑問吧。
文章版權(quán)歸作者所有,未經(jīng)允許請(qǐng)勿轉(zhuǎn)載,若此文章存在違規(guī)行為,您可以聯(lián)系管理員刪除。
轉(zhuǎn)載請(qǐng)注明本文地址:http://m.specialneedsforspecialkids.com/yun/66752.html
摘要:分別為音頻視頻和字母進(jìn)行相關(guān)處理。向下跟蹤兩層,會(huì)發(fā)現(xiàn),核心函數(shù)是。至此解碼算完了。整個(gè)過程真是粗略分析啊,對(duì)自己也很抱歉,暫時(shí)先這樣吧。 上文中說到在read_thread線程中有個(gè)關(guān)鍵函數(shù):avformat_open_input(utils.c),應(yīng)當(dāng)是讀取視頻文件的,這個(gè)函數(shù)屬于ffmpeg層。這回進(jìn)入到其中去看下: int avformat_open_input(AVForma...
摘要:我們下面先從讀取線程入手。無論這個(gè)循環(huán)前后干了什么,都是要走這一步,讀取數(shù)據(jù)幀。從開始,我理解的是計(jì)算出當(dāng)前數(shù)據(jù)幀的時(shí)間戳后再計(jì)算出播放的起始時(shí)間到當(dāng)前時(shí)間,然后看這個(gè)時(shí)間戳是否在此范圍內(nèi)。 ijkplayer現(xiàn)在比較流行,因?yàn)楣ぷ麝P(guān)系,接觸了他,現(xiàn)在做個(gè)簡(jiǎn)單的分析記錄吧。我這里直接跳過java層代碼,進(jìn)入c層,因?yàn)榇蠖鄶?shù)的工作都是通過jni調(diào)用到c層來完成的,java層的內(nèi)容并不是主...
摘要:下面是,讀取頭信息頭信息。猜測(cè)網(wǎng)絡(luò)部分至少在一開始就應(yīng)當(dāng)初始化好的,因此在的過程里面找,在中找到了。就先暫時(shí)分析到此吧。 這章要簡(jiǎn)單分析下ijkplayer是如何從文件或網(wǎng)絡(luò)讀取數(shù)據(jù)源的。還是read_thread函數(shù)中的關(guān)鍵點(diǎn)avformat_open_input函數(shù): int avformat_open_input(AVFormatContext **ps, const char ...
摘要:初始化的過程上一篇其實(shí)并未完全分析完,這回接著來。層的函數(shù)中,最后還有的調(diào)用,走的是層的。結(jié)構(gòu)體如下的和,以及,其余是狀態(tài)及的內(nèi)容。整個(gè)過程是個(gè)異步的過程,并不阻塞。至于的東西,都是在層創(chuàng)建并填充的。 初始化的過程上一篇其實(shí)并未完全分析完,這回接著來。java層的initPlayer函數(shù)中,最后還有native_setup的調(diào)用,走的是c層的IjkMediaPlayer_native_...
閱讀 1320·2021-09-27 13:56
閱讀 2353·2019-08-26 10:35
閱讀 3513·2019-08-23 15:53
閱讀 1862·2019-08-23 14:42
閱讀 1244·2019-08-23 14:33
閱讀 3575·2019-08-23 12:36
閱讀 1961·2019-08-22 18:46
閱讀 1010·2019-08-22 14:06