ffplay如何通過URLProtocol拉取http數據【源碼篇】

背景

 ffmpeg對於音視頻開發的同學想必都不陌生,它作爲被普遍認可的編解碼開源庫,很多播放器都採用它來完成編解碼的功能。作爲一個Android開發人員,可以通過ffmepg自帶的播放器ffplay學習ffmpeg是如何拉取多媒體數據、解碼、av同步的機制。
  本文主要從URLProtocol的視角探究ffplay在demux數據之前是如何拉取數據的。

stream_open : 在ffplay::main函數中,負責根據filename決定拉取數據的協議,以及生成URLProtocol等數據
demux : 解碼. 例如h264數據轉換成yuv的過程,或者是aac音頻數據轉換成pcm的過程
解封裝 : 在解碼之前,用於將封裝好的多媒體數據解成一個個視頻軌、音頻軌、字幕軌,對於不同的媒體資源格式有不同的解封裝過程

必備的小知識點

在閱讀源碼分析之前,請確保清晰以下概念(大神請無視):

  1. ffplay ,ffmpeg自身實現的播放器,本身採用ffmpeg實現的tcp協議進行拉流(也可以不是tcp進行數據傳輸,具體根據URLProtocol匹配的協議決定)。同時採用解碼器進行demux,並實現了av同步.
  2. SDL_CreateThread,通過SDL庫創建一個線程,該函數允許傳入函數指針,使得該函數在創建的線程中執行
  3. SDL_CondWaitTimeout,通過SDL庫指定有限時間等待一個SDL_COND信號量

函數調用流程圖

在這裏插入圖片描述

ffurl_alloc

在上述的流程調用中,從ffurl_alloc函數開始進行URLProtocol和URLContext的初始化。如下所示:

int ffurl_alloc(URLContext **puc, const char *filename, int flags,
                const AVIOInterruptCB *int_cb)
{
    const URLProtocol *p = NULL;
    // p -> ff_http_protocol
    p = url_find_protocol(filename);
    //初始化URLContext,並將protocol賦值進去
    if (p)
       return url_alloc_for_protocol(puc, p, filename, flags, int_cb);
	//代碼執行到此,代表URLContext初始化失敗
    *puc = NULL;
    if (av_strstart(filename, "https:", NULL))
        av_log(NULL, AV_LOG_WARNING, "https protocol not found, recompile FFmpeg with "
                                     "openssl, gnutls "
                                     "or securetransport enabled.\n");
    return AVERROR_PROTOCOL_NOT_FOUND;
}
  1. url_find_protocol: 根據輸入的文件名或者url進行protocol探查,如filename以http開頭的則探查返回的結果爲ff_http_protocol
  2. url_alloc_for_protocol: 初始化URLContext,並將上一個步驟返回的URLProtocol賦值其中

const URLProtocol ff_http_protocol = {
  .name = “http”,
  .url_open2 = http_open,
  .url_read = http_read,
  .url_write = http_write,
  .priv_data_size = sizeof(HTTPContext),
};

url_find_protocol

url_find_protocol函數主要通過filename進行探查.如果匹配,則返回對應的URLProtocol,ffmpeg將所有支持的protocol以URLProtocol常量保存在protocol_list.c中。

static const struct URLProtocol *url_find_protocol(const char *filename)
{
    const URLProtocol **protocols;
    char proto_str[128], proto_nested[128], *ptr;
    size_t proto_len = strspn(filename, URL_SCHEME_CHARS);
    int i;
    //filename == http://xxxx, proto_len = 4
    if (filename[proto_len] != ':' &&
        (strncmp(filename, "subfile,", 8) || !strchr(filename + proto_len + 1, ':')) ||
        is_dos_path(filename))
        strcpy(proto_str, "file");
    else
        av_strlcpy(proto_str, filename,
                   FFMIN(proto_len + 1, sizeof(proto_str)));      //sizeof(proto_str) == 128, so proto_str == 'http'
                   

    if ((ptr = strchr(proto_str, ',')))
        *ptr = '\0';
    //proto_nested -> 'http'
    av_strlcpy(proto_nested, proto_str, sizeof(proto_nested));
    
    if ((ptr = strchr(proto_nested, '+')))
        *ptr = '\0';
    
    //將常量url_protocols裝配到數組中,url_protocals -> [ff_http_protocol,....]
    protocols = ffurl_get_protocols(NULL, NULL);
    //分配失敗,直接返回
    if (!protocols)
        return NULL;
    
    for (i = 0; protocols[i]; i++) {
            const URLProtocol *up = protocols[i];
        //按照name屬性匹配URLProtocal
        if (!strcmp(proto_str, up->name)) {
            av_freep(&protocols);
            return up;
        }
        if (up->flags & URL_PROTOCOL_FLAG_NESTED_SCHEME &&
            !strcmp(proto_nested, up->name)) {
            av_freep(&protocols);
            return up;
        }
    }
    av_freep(&protocols);

    return NULL;
}
  1. 在protocol_list.c中羅列了所有ffmpeg支持的協議,例如http、rtmp、udp、file等。這些協議常量以URLProtocol結構的形式保存在url_protocols數組中。而url_find_protocol函數就是要通過filename匹配到特定的URLProtocol實例。
  2. 首先,url_find_protocol通過正則表達式判斷出filename的協議長度,主要通過判斷filename首個非字母非數字的下標來獲得。例如http://www.badu.com/xxx.mp4,則首個非字母非數字的字符爲[:],其下標爲4。所以proto_len = 4
  3. 隨後,根據proto_len截取出filename中的協議字符串,例如http.再逐個跟ff_http_protocol等URLProtocol的name成員進行比較,如果相等,則代表匹配到URLProtocol。

http_open_cnx_internal

由http_connect函數進入,http_open_cnx_internal函數主要完成底層protocol的生成,以及與服務器進行握手(通過底層protocol發送報文)

static int http_open_cnx_internal(URLContext *h, AVDictionary **options)
{
	//默認底層協議爲tcp
    const char *path, *proxy_path, *lower_proto = "tcp", *local_path;
    
    char hostname[1024], hoststr[1024], proto[10];
    char auth[1024], proxyauth[1024] = "";
    char path1[MAX_URL_SIZE];
    char buf[1024], urlbuf[MAX_URL_SIZE];
    int port, use_proxy, err, location_changed = 0;
    
    HTTPContext *s = h->priv_data;
    //av_url_split -> 
    av_url_split(proto, sizeof(proto), auth, sizeof(auth),
                 hostname, sizeof(hostname), &port,
                 path1, sizeof(path1), s->location);
    ff_url_join(hoststr, sizeof(hoststr), NULL, NULL, hostname, port, NULL);


    if (!strcmp(proto, "https")) {
        lower_proto = "tls";
        use_proxy   = 0;
        if (port < 0)
            port = 443;
    }
    if (port < 0)
        port = 80;

    if (path1[0] == '\0')
        path = "/";
    else
        path = path1;
    local_path = path;
    ....
    
    //拼湊lower protocol字符串(buf) -> tcp://{hostname}:{port}
    ff_url_join(buf, sizeof(buf), lower_proto, NULL, hostname, port, NULL);
    //s: HTTPContext, s->hd: URLContext
    if (!s->hd) {
        //匹配tcp protocal
        err = ffurl_open_whitelist(&s->hd, buf, AVIO_FLAG_READ_WRITE,
                                   &h->interrupt_callback, options,
                                   h->protocol_whitelist, h->protocol_blacklist, h);
        if (err < 0)
            return err;
    }
    //進行http連接
    err = http_connect(h, path, local_path, hoststr,
                       auth, proxyauth, &location_changed);
    if (err < 0)
        return err;

    return location_changed;
}
  1. 底層protocol默認採用的是tcp,如果是https協議的話會更改爲tls.
  2. 調用ffurl_open_whitelist生成對應tcp的URLProtocol(ff_tcp_protocol)
  3. 調用底層protocol進行發送報文

http_connect

該函數的主要作用是調用底層protocol發送報文,寫入請求header並讀取服務器返回的結果存入HTTPContext。例如,當filename爲http://xxxx:port時,URLProtocol對應ff_http_protocol,同時private_data中嵌套了URLContext,該成員中還存放着對應ff_tcp_protocol的URLProtocol,這也是tcp在ffmpeg中視作lower protocol的來由。

static int http_connect(URLContext *h, const char *path, const char *local_path,
                        const char *hoststr, const char *auth,
                        const char *proxyauth, int *new_location)
{
    HTTPContext *s = h->priv_data;
    int post, err;
    char headers[HTTP_HEADERS_SIZE] = "";
    char *authstr = NULL, *proxyauthstr = NULL;
    uint64_t off = s->off;
    int len = 0;
    const char *method;
    int send_expect_100 = 0;
    int ret;

    /* send http header */
    //第一次初始化AVFormatContext時,爲FLAG_READ
    post = h->flags & AVIO_FLAG_WRITE;

    if (s->post_data) {
        /* force POST method and disable chunked encoding when
         * custom HTTP post data is set */
        post            = 1;
        s->chunked_post = 0;
    }

    //method -> "GET"
    if (s->method)
        method = s->method;
    else
        method = post ? "POST" : "GET";
	....
    
    /* set default headers if needed */
    if (!has_header(s->headers, "\r\nUser-Agent: "))
        len += av_strlcatf(headers + len, sizeof(headers) - len,
                           "User-Agent: %s\r\n", s->user_agent);
    if (!has_header(s->headers, "\r\nAccept: "))
        len += av_strlcpy(headers + len, "Accept: */*\r\n",
                          sizeof(headers) - len);
    // Note: we send this on purpose even when s->off is 0 when we're probing,
    // since it allows us to detect more reliably if a (non-conforming)
    // server supports seeking by analysing the reply headers.
    if (!has_header(s->headers, "\r\nRange: ") && !post && (s->off > 0 || s->end_off || s->seekable == -1)) {
        len += av_strlcatf(headers + len, sizeof(headers) - len,
                           "Range: bytes=%"PRIu64"-", s->off);
        if (s->end_off)
            len += av_strlcatf(headers + len, sizeof(headers) - len,
                               "%"PRId64, s->end_off - 1);
        len += av_strlcpy(headers + len, "\r\n",
                          sizeof(headers) - len);
    }
    if (send_expect_100 && !has_header(s->headers, "\r\nExpect: "))
        len += av_strlcatf(headers + len, sizeof(headers) - len,
                           "Expect: 100-continue\r\n");

    if (!has_header(s->headers, "\r\nConnection: ")) {
        if (s->multiple_requests)
            len += av_strlcpy(headers + len, "Connection: keep-alive\r\n",
                              sizeof(headers) - len);
        else
            len += av_strlcpy(headers + len, "Connection: close\r\n",
                              sizeof(headers) - len);
    }

    if (!has_header(s->headers, "\r\nHost: "))
        len += av_strlcatf(headers + len, sizeof(headers) - len,
                           "Host: %s\r\n", hoststr);
    if (!has_header(s->headers, "\r\nContent-Length: ") && s->post_data)
        len += av_strlcatf(headers + len, sizeof(headers) - len,
                           "Content-Length: %d\r\n", s->post_datalen);

    if (!has_header(s->headers, "\r\nContent-Type: ") && s->content_type)
        len += av_strlcatf(headers + len, sizeof(headers) - len,
                           "Content-Type: %s\r\n", s->content_type);
    if (!has_header(s->headers, "\r\nCookie: ") && s->cookies) {
        char *cookies = NULL;
        if (!get_cookies(s, &cookies, path, hoststr) && cookies) {
            len += av_strlcatf(headers + len, sizeof(headers) - len,
                               "Cookie: %s\r\n", cookies);
            av_free(cookies);
        }
    }
    if (!has_header(s->headers, "\r\nIcy-MetaData: ") && s->icy)
        len += av_strlcatf(headers + len, sizeof(headers) - len,
                           "Icy-MetaData: %d\r\n", 1);

    /* now add in custom headers */
    if (s->headers)
        av_strlcpy(headers + len, s->headers, sizeof(headers) - len);

    ret = snprintf(s->buffer, sizeof(s->buffer),
             "%s %s HTTP/1.1\r\n"
             "%s"
             "%s"
             "%s"
             "%s%s"
             "\r\n",
             method,
             path,
             post && s->chunked_post ? "Transfer-Encoding: chunked\r\n" : "",
             headers,
             authstr ? authstr : "",
             proxyauthstr ? "Proxy-" : "", proxyauthstr ? proxyauthstr : "");

    av_log(h, AV_LOG_DEBUG, "request: %s\n", s->buffer);

    if (strlen(headers) + 1 == sizeof(headers) ||
        ret >= sizeof(s->buffer)) {
        av_log(h, AV_LOG_ERROR, "overlong headers\n");
        err = AVERROR(EINVAL);
        goto done;
    }

    //寫入請求的header
    if ((err = ffurl_write(s->hd, s->buffer, strlen(s->buffer))) < 0)
        goto done;

    if (s->post_data)
        if ((err = ffurl_write(s->hd, s->post_data, s->post_datalen)) < 0)
            goto done;

    /* init input buffer */
    s->buf_ptr          = s->buffer;
    s->buf_end          = s->buffer;
    s->line_count       = 0;
    s->off              = 0;
    s->icy_data_read    = 0;
    s->filesize         = UINT64_MAX;
    s->willclose        = 0;
    s->end_chunked_post = 0;
    s->end_header       = 0;
    if (post && !s->post_data && !send_expect_100) {
        /* Pretend that it did work. We didn't read any header yet, since
         * we've still to send the POST data, but the code calling this
         * function will check http_code after we return. */
        s->http_code = 200;
        err = 0;
        goto done;
    }

    /* wait for header */
    //讀取header
    err = http_read_header(h, new_location);
    if (err < 0)
        goto done;

    if (*new_location)
        s->off = off;

    err = (off == s->off) ? 0 : -1;
done:
    av_freep(&authstr);
    av_freep(&proxyauthstr);
    return err;
}
  1. ffurl_write: 通過lower protocol(例如tcp)寫入協議頭
  2. http_read_header: 讀取服務器返回的header,並保存在HTTPContext

probe(探查)

在根據url生成URLProtocol以及根據讀取服務器返回的header初始化HTTPContext後,需要進一步的判斷媒體資源屬於哪種格式,適用於哪種解碼器進行demux.

/* Open input file and probe the format if necessary. */
static int init_input(AVFormatContext *s, const char *filename,
                      AVDictionary **options)
{
    int ret;
    AVProbeData pd = { filename, NULL, 0 };
    int score = AVPROBE_SCORE_RETRY;
    
    //usually no 
    if (s->pb) {
        s->flags |= AVFMT_FLAG_CUSTOM_IO;
        if (!s->iformat)
            return av_probe_input_buffer2(s->pb, &s->iformat, filename,
                                         s, 0, s->format_probesize);
        else if (s->iformat->flags & AVFMT_NOFILE)
            av_log(s, AV_LOG_WARNING, "Custom AVIOContext makes no sense and "
                                      "will be ignored with AVFMT_NOFILE format.\n");
        return 0;
    }
    
    //對應iformat已經初始化的情況
    if ((s->iformat && s->iformat->flags & AVFMT_NOFILE) ||
        (!s->iformat && (s->iformat = av_probe_input_format2(&pd, 0, &score))))
        return score;

    //call avio_open2, s->pb : AVIOContext
    if ((ret = s->io_open(s, &s->pb, filename, AVIO_FLAG_READ | s->avio_flags, options)) < 0)
        return ret;

    return av_probe_input_buffer2(s->pb, &s->iformat, filename,
                                 s, 0, s->format_probesize);
}
  1. 初始化時,AVInputFormat是無效的。因此會調用av_probe_input_buffer2進行媒體資源的格式探查

av_probe_input_buffer2

該函數的主要行爲是通過讀取數據匹配到合適的解碼器,比如針對aac音頻,那匹配的就是ff_aac_demuxer解碼器。

int av_probe_input_buffer2(AVIOContext *pb, AVInputFormat **fmt,
                          const char *filename, void *logctx,
                          unsigned int offset, unsigned int max_probe_size)
{
    AVProbeData pd = { filename ? filename : "" };
    uint8_t *buf = NULL;
    int ret = 0, probe_size, buf_offset = 0;
    int score = 0;
    int ret2;
    
    //s->format_probesize = 0, PROBE_BUF_MAX == 1<<20
    if (!max_probe_size)
      max_probe_size = PROBE_BUF_MAX;


    //URLContext
    if (pb->av_class) {
        uint8_t *mime_type_opt = NULL;
        char *semi;
        //在讀取http協議頭時獲得,獲取
        av_opt_get(pb, "mime_type", AV_OPT_SEARCH_CHILDREN, &mime_type_opt);
        pd.mime_type = (const char *)mime_type_opt;
        semi = pd.mime_type ? strchr(pd.mime_type, ';') : NULL;
        if (semi) {
            *semi = '\0';
        }
    }
    //PROBE_BUF_MIN -> 2048
    //probe的大小從2048開始,隨後以2倍大小增加
    for (probe_size = PROBE_BUF_MIN; probe_size <= max_probe_size && !*fmt;
        //每次循環都會增加1倍的probe_size
        probe_size = FFMIN(probe_size << 1,
                            FFMAX(max_probe_size, probe_size + 1))) {
                              
        //AVPROBE_SCORE_RETRY = 25
        score = probe_size < max_probe_size ? AVPROBE_SCORE_RETRY : 0;

        /* Read probe data. */
        //分配buf內存空間
        if ((ret = av_reallocp(&buf, probe_size + AVPROBE_PADDING_SIZE)) < 0)
            goto fail;
            
        //讀取多媒體數據
        //1.讀到數據.ret > 0
        //2.沒讀到數據走if分支
        if ((ret = avio_read(pb, buf + buf_offset,
                             probe_size - buf_offset)) < 0) {
            /* Fail if error was not end of file, otherwise, lower score. */
            if (ret != AVERROR_EOF)
                goto fail;

            score = 0;
            ret   = 0;          /* error was end of file, nothing read */
        }
        
        //buf_offset初始化時爲0
        buf_offset += ret;
        if (buf_offset < offset)
            continue;
        pd.buf_size = buf_offset - offset;
        pd.buf = &buf[offset];
        
        //設置probe data末尾的extra allocated bytes爲0
        memset(pd.buf + pd.buf_size, 0, AVPROBE_PADDING_SIZE);
      
        /* Guess file format. */
        *fmt = av_probe_input_format2(&pd, 1, &score);
        if (*fmt) {
            /* This can only be true in the last iteration. */
            if (score <= AVPROBE_SCORE_RETRY) {
                av_log(logctx, AV_LOG_WARNING,
                       "Format %s detected only with low score of %d, "
                       "misdetection possible!\n", (*fmt)->name, score);
            } else
                av_log(logctx, AV_LOG_DEBUG,
                       "Format %s probed with size=%d and score=%d\n",
                       (*fmt)->name, probe_size, score);
#if 0
            FILE *f = fopen("probestat.tmp", "ab");
            fprintf(f, "probe_size:%d format:%s score:%d filename:%s\n", probe_size, (*fmt)->name, score, filename);
            fclose(f);
#endif
        }
    }

    if (!*fmt)
        ret = AVERROR_INVALIDDATA;

fail:
    /* Rewind. Reuse probe buffer to avoid seeking. */
    ret2 = ffio_rewind_with_probe_data(pb, &buf, buf_offset);
    if (ret >= 0)
        ret = ret2;

    av_freep(&pd.mime_type);
    return ret < 0 ? ret : score;
}
  1. 初始化AVFormatContext時並沒有確定probe data的大小,所以這裏會統一設置爲1<<20
  2. 創建AVProbeData結構,並把之前URLProtocol讀取到的mime_type賦值到其中
  3. 循環多次進行probe行爲,每次循環探查的數據大小逐漸增加,當匹配到特定解碼器後跳出循環。
  4. probe行爲:a).通過tcp讀取數據,讀取數據的上限爲probe_max(1<<20)。b).讀取數據的過程中,如果遇到end of file之外的錯誤則探查行爲失敗,進入失敗流程。c).讀取數據之後,調用av_probe_input_format2(實質上調用的是av_probe_input_format3)進行format guess,並打分。

avio_read

ffmpeg自帶的數據讀取函數,讀取數據之後會存放在AVIOContext->buffer之中

/**
 * size -> probe_size,第一次probe_size爲2048
 **/
int avio_read(AVIOContext *s, unsigned char *buf, int size)
{
    int len, size1;
    size1 = size;
    while (size > 0) {
        len = FFMIN(s->buf_end - s->buf_ptr, size);
        //s-> write_flag = 0
        if (len == 0 || s->write_flag) {
            //s->update_checksum = null
            //s->buffer_size = IO_BUFFER_SIZE     //32768
            if((s->direct || size > s->buffer_size) && !s->update_checksum) {
                // bypass the buffer and read data directly into buf
                if(s->read_packet)
                    len = s->read_packet(s->opaque, buf, size);

                if (len <= 0) {
                    /* do not modify buffer if EOF reached so that a seek back can
                    be done without rereading data */
                    s->eof_reached = 1;
                    if(len<0)
                        s->error= len;
                    break;
                } else {
                    s->pos += len;
                    s->bytes_read += len;
                    size -= len;
                    buf += len;
                    // reset the buffer
                    s->buf_ptr = s->buffer;
                    s->buf_end = s->buffer/* + len*/;
                }
            } else {
                //goto this
                //通過tcp讀取一次數據,len!=0
                fill_buffer(s);
                len = s->buf_end - s->buf_ptr;
                //如果數據讀取完畢,終止循環
                if (len == 0)
                    break;
            }
        } else {
            //下一次循環,goto this
            //將s->buf_ptr的內容複製到buf
            memcpy(buf, s->buf_ptr, len);
            buf += len;
            s->buf_ptr += len;
            //size減去已讀數據的長度
            size -= len;
        }
    }
    ....
    return size1 - size;
}
  1. 首次讀取通過調用fill_buffer進行,如果是http協議的媒體資源,會通過底層的tcp去讀取數據,並存放在AVIOContext->buffer中
  2. 進入第二次循環之後,會將AVIOContext->buffer拷貝到目標buf中

av_probe_input_format3

av_probe_input_format3函數的主要行爲是format guess.

AVInputFormat *av_probe_input_format3(AVProbeData *pd, int is_opened,
                                      int *score_ret)
{
    AVProbeData lpd = *pd;
    AVInputFormat *fmt1 = NULL, *fmt;
    int score, score_max = 0;
    const static uint8_t zerobuffer[AVPROBE_PADDING_SIZE];
    enum nodat {
        NO_ID3,
        ID3_ALMOST_GREATER_PROBE,
        ID3_GREATER_PROBE,
        ID3_GREATER_MAX_PROBE,
    } nodat = NO_ID3;

    if (!lpd.buf)
        lpd.buf = (unsigned char *) zerobuffer;
    //如果含id3信息(通常是mp3),則將buf移動到id3信息之後數據部分之前
    if (lpd.buf_size > 10 && ff_id3v2_match(lpd.buf, ID3v2_DEFAULT_MAGIC)) {
        int id3len = ff_id3v2_tag_len(lpd.buf);
        if (lpd.buf_size > id3len + 16) {
            if (lpd.buf_size < 2LL*id3len + 16)
                nodat = ID3_ALMOST_GREATER_PROBE;
            lpd.buf      += id3len;
            lpd.buf_size -= id3len;
        } else if (id3len >= PROBE_BUF_MAX) {
            nodat = ID3_GREATER_MAX_PROBE;
        } else
            nodat = ID3_GREATER_PROBE;
    }

    fmt = NULL;
    
    //0.初始化時fmt1爲null
    //1.ffplay::main -> register_all 
    //2.REGISTER_DEMUXER (AAC,aac); ,註冊解碼器,加入新的AVInputFormat(ff_aac_demuxer: AVInputFormat)
    
    //遍歷AVInputFormat鏈表,如ff_mp3_demuxer等
    while ((fmt1 = av_iformat_next(fmt1))) {
        if (!is_opened == !(fmt1->flags & AVFMT_NOFILE) && strcmp(fmt1->name, "image2"))
            continue;
        score = 0;
        
        if (fmt1->read_probe) {
            //mp3 -> mp3_read_probe
            score = fmt1->read_probe(&lpd);
            if (score)
                av_log(NULL, AV_LOG_TRACE, "Probing %s score:%d size:%d\n", fmt1->name, score, lpd.buf_size);
            if (fmt1->extensions && av_match_ext(lpd.filename, fmt1->extensions)) {
                switch (nodat) {
                case NO_ID3:
                    score = FFMAX(score, 1);
                    break;
                case ID3_GREATER_PROBE:
                case ID3_ALMOST_GREATER_PROBE:
                    score = FFMAX(score, AVPROBE_SCORE_EXTENSION / 2 - 1);
                    break;
                case ID3_GREATER_MAX_PROBE:
                    score = FFMAX(score, AVPROBE_SCORE_EXTENSION);
                    break;
                }
            }
        } else if (fmt1->extensions) {  //如果demuxer有登記自身的後綴,如ff_mp3_demuxer->extensions == 'mp2,mp3,m2a,mpa'
            if (av_match_ext(lpd.filename, fmt1->extensions))
                score = AVPROBE_SCORE_EXTENSION;
        }
        if (av_match_name(lpd.mime_type, fmt1->mime_type)) {
            if (AVPROBE_SCORE_MIME > score) {
                av_log(NULL, AV_LOG_DEBUG, "Probing %s score:%d increased to %d due to MIME type\n", fmt1->name, score, AVPROBE_SCORE_MIME);
                score = AVPROBE_SCORE_MIME;
            }
        }
        if (score > score_max) {
            score_max = score;
            fmt       = fmt1;
        } else if (score == score_max)
            fmt = NULL;
    }
    if (nodat == ID3_GREATER_PROBE)
        score_max = FFMIN(AVPROBE_SCORE_EXTENSION / 2 - 1, score_max);
    *score_ret = score_max;

    return fmt;
}
  1. 首先在AVProbeData數據中,查看是否有id3信息。id3一般在mp3音頻中,該信息用於存放比如專輯名稱等一些數據。
  2. 調用av_iformat_next遍歷解碼器。在初始化AVFormatContext之後,會調用av_register_all註冊解碼器和編碼器。在單次循環中,調用對應解碼器的read_probe進行打分,比如mp3音頻對應的就是mp3_read_probe函數,隨後根據最高分選出適合的解碼器
發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章