ffmpeg裏讀取文件是最基礎的操作,但是在很多行業裏面,讀取文件的操作實際作用的意義不大。還是讀取內存的操作作用大一些。其兩個之間主要的差距主要是在avformat_open_input的參書中的AVFormatContext->pb裏面只要這個pb用avio_alloc_context初始化過了,就可以從內存裏面的讀取了。但是因爲裏面的函數是回調函數,而且讀取的次數又很多,所以使用過程中還需要注意。
#include <stdio.h>
#include <iostream>
using namespace std;
extern "C"
{
#include "libavcodec/avcodec.h"
#include "libavformat/avformat.h"
#include "libswscale/swscale.h"
};
typedef struct
{
char *pbuff;
int ulSize;
int ulSizeOffset;
}stData;
int a =1;
class AVParse
{
public:
AVParse()
{
m_buff = (unsigned char *)malloc(50000);
m_bufferSize = 50000;
m_Data = (stData *)malloc(sizeof(stData));
m_Data->pbuff = (char *)malloc(50000);
m_Data->ulSize = 50000;
m_Data->ulSizeOffset = 0;
pavPacket = (AVPacket *)av_malloc(sizeof(AVPacket));
videocount = 1;
lVidepId = -1;
lAudioId = -1;
pOutputFile = NULL;
bIsInit = false;
}
~AVParse()
{
if (NULL != m_buff)
{
free(m_buff);
m_buff = NULL;
}
m_bufferSize = 0;
if (NULL != m_Data->pbuff)
{
free(m_Data->pbuff);
m_Data->pbuff = NULL;
}
}
static int read_buffer(void *opaque, uint8_t *buf, int buf_size)
{
stData *pdata = (stData *)opaque;
int cpysize = pdata->ulSizeOffset + buf_size < pdata->ulSize ? buf_size:(pdata->ulSize - pdata->ulSizeOffset);
if (pdata->ulSizeOffset < pdata->ulSize)
{
memcpy(buf, pdata->pbuff + pdata->ulSizeOffset, cpysize);
pdata->ulSizeOffset += cpysize;
return cpysize;
}
else
{
return 0;
}
}
static int wrete_buffer(void *opaque, uint8_t *buf, int buf_size)
{
return 0;
}
void InitOutputAudio()
{
avformat_alloc_output_context2(&m_pavformatOutput, NULL, "mp3", NULL);
}
void InitParse()
{
av_register_all();
m_pavFormatContext = avformat_alloc_context();
m_pavFormatContext->pb = avio_alloc_context(m_buff, m_bufferSize, 0, (void *)m_Data, read_buffer, NULL, NULL);
int ulRet = avformat_open_input(&m_pavFormatContext , NULL ,NULL ,NULL);
if(ulRet < 0)
{
cout << "avformat_open_input failed!!!" <<endl;
//return ulRet;
}
//else
//{
// return 0;
//}
for (int i =0 ;i< m_pavFormatContext->nb_streams; i++)
{
if (AVMEDIA_TYPE_VIDEO == m_pavFormatContext->streams[i]->codec->codec_type)
{
lVidepId = i;
if (AV_CODEC_ID_H264 == m_pavFormatContext->streams[i]->codec->codec_id)
{
cout << "code id is AV_CODEC_ID_H264!!!"<< endl;
}
}
if (AVMEDIA_TYPE_AUDIO == m_pavFormatContext->streams[i]->codec->codec_type)
{
lAudioId = i;
if (AV_CODEC_ID_AAC == m_pavFormatContext->streams[i]->codec->codec_id)
{
cout << "code id is AV_CODEC_ID_AAC!!!" <<endl;
}
}
}
bIsInit = true;
}
void Muxing()
{
if (false ==bIsInit)
{
InitParse();
}
while(1)
{
if (av_read_frame(m_pavFormatContext, pavPacket) < 0)
{
break;
}
else
{
if (lVidepId == pavPacket->stream_index)
{
cout<< "Write " << videocount <<" Video Frame" << endl;
FILE *pfile = fopen("F:\\test\\a.es", "ab+");
fwrite(pavPacket->data,1, pavPacket->size, pfile);
fclose(pfile);
videocount ++;
}
}
}
}
int InitOutputFile()
{
pOutputFile = fopen("F:\\test\\a.264","ab+");
}
AVFormatContext *m_pavformatOutput; //輸出content
AVFormatContext *m_pavFormatContext; //輸入content
unsigned char * m_buff;
int m_bufferSize;
stData *m_Data;
int lVidepId;
int lAudioId;
FILE *pOutputFile;
bool bIsInit;
AVPacket *pavPacket;
int videocount;
};
int main(int argc, char* argv[])
{
char szfilePath[100] = {"F:\\test\\ps.ps"};
FILE *pFile = fopen(szfilePath, "rb+");
AVParse *pavParse = new AVParse();
while (1)
{
if (fread(pavParse->m_Data->pbuff, 1, pavParse->m_Data->ulSize, pFile) == pavParse->m_Data->ulSize)
{
pavParse->m_Data->ulSizeOffset = 0;
pavParse->Muxing();
}
else
{
break;
}
}
cout << "Parse File End!!!" << endl;
system("pause");
return 0;
}