ffmpeg實時解碼H264數據流

以下是一個測試程序,用的是讀取h264文件數據然後用ffmpeg解碼的方法,模擬實時數據的解碼。測試已通過,解碼正常。

至於如何編譯ffmpeg、如何實現收發數據、如何拼幀這裏不予說明,請查看相關文檔。
 

 1、.h文件裏面
#ifdef __cplusplus
extern "C" {
#endif

#include "libavformat/avformat.h"
#include "libavutil/opt.h"
#include "libavutil/imgutils.h"
#include "libavcodec/avcodec.h"
#include "libswscale/swscale.h"


#pragma comment(lib, "libgcc.a")
#pragma comment(lib, "libavcodec.a")
#pragma comment(lib, "libavformat.a")
#pragma comment(lib, "libavutil.a")
#pragma comment(lib, "libmingwex.a")


#ifdef __cplusplus
}
#endif 


2、.cpp文件裏面

//寫YUV(420P)數據
 
FILE *g_fd_yuv420p = fopen("C:\\decodeTest\\420p_stream.yuv", "wb");
static void SaveFrame_YUV420P_2(AVFrame *pFrame, int width, int height, int iFrame){
if(g_fd_yuv420p != NULL){
if(pFrame != NULL){
unsigned int pos = 0;
for(int y=0; y<height; y++){
fwrite(pFrame->data[0] + y * pFrame->linesize[0], 1, width, g_fd_yuv420p);
}

for(int y=0; y<(height / 2); y++){
fwrite(pFrame->data[1] + y * pFrame->linesize[1], 1, width / 2, g_fd_yuv420p);
}
for(int y=0; y<(height / 2); y++){
fwrite(pFrame->data[2] + y * pFrame->linesize[2], 1, width / 2, g_fd_yuv420p);
}
}else{
fclose(g_fd_yuv420p);
}
}
}

//獲取H264數據流中下一個幀開始的位置
int getNextNalPos(unsigned char *pData, int startPos, int dataLength, unsigned long *pPos){

if(pData != NULL && startPos >= 0 && (dataLength - startPos) > 4){
unsigned char *pTmp = pData + startPos;
int restLen = dataLength - startPos;
while(restLen >= 5){

int *p = (int *)pTmp;
if(p[0] == 0x01000000){

*pPos = (pTmp - pData);
if(pTmp[4] == 0x67){
return 1;//SPS
}else if(pTmp[4] == 0x68){
return 2;//PPS
}else{
return 0;
}
}

restLen --;
pTmp ++;
}
}
return -1;
}

//測試函數
void CtestCodecDlg::OnBnClickedButton2()
{
AVCodec *pCodec = NULL;
AVCodecContext *pContext = NULL;
AVFrame *pFrame = NULL;
AVPacket packet = {0};


av_register_all();

pCodec = avcodec_find_decoder(CODEC_ID_H264);
if(pCodec == NULL){
LOG("沒有找到H264 codec");
goto stream_decode_finish;
}

pContext = avcodec_alloc_context3(pCodec);
if(pContext == NULL){
LOG("avcodec_alloc_context3 失敗");
goto stream_decode_finish;
}
pContext->time_base.num = 1;
pContext->time_base.den = 25;
pContext->bit_rate = 0;
pContext->frame_number = 1;
pContext->codec_type = AVMEDIA_TYPE_VIDEO;
        //圖像寬高的求解看第三步 
pContext->width = 352;
pContext->height = 288;
/* pContext->pix_fmt = AV_PIX_FMT_YUV420P;

pContext->profile = 578;
pContext->level = 13;
pContext->pkt_timebase.num = 1;
pContext->pkt_timebase.den = 1200000;*/

if(avcodec_open2(pContext, pCodec, NULL) < 0){
LOG("avcodec_open2 失敗");
goto stream_decode_finish;
}

pFrame = avcodec_alloc_frame();
if(pFrame == NULL){
LOG("avcodec_alloc_frame 失敗");
goto stream_decode_finish;
}

unsigned long cacheSize = (1024 << 10);
unsigned char *pSrcData = (unsigned char *)malloc(cacheSize);
if(pSrcData == NULL){
LOG("malloc 失敗");
goto stream_decode_finish;
}
memset(pSrcData, 0, cacheSize);

FILE *fd = fopen("C:\\out_2.h264", "rb");
// FILE *fd = fopen("C:\\gaoqing.h264", "rb");
if(fd == NULL){
LOG("fopen 失敗");
goto stream_decode_finish;
}

/* unsigned char extraData[64] = {0};
fread(extraData, 1, 36, fd);
pContext->extradata = extraData;
pContext->extradata_size = 36;
fseek(fd, 0, SEEK_SET);*/

int fdPos = 0;//文件指針
int frameNum = 1;
while(true){

bool bBreak = false;
int readLen = fread(pSrcData, 1, cacheSize, fd);
if(readLen < cacheSize){
fseek(fd, 0, SEEK_SET);
bBreak = true;
}

int pos = 0;
int processedLen = 0;
unsigned long nextNalPos = 0;
int ret = getNextNalPos(pSrcData, processedLen, readLen, &nextNalPos);
int nalType = -1;
while(ret != -1){

//TRACE("nextPos: %0x\n", nextNalPos);

if(ret == 1){

int tmpLen = nextNalPos - pos;
if(tmpLen > 0){//解碼
packet.data = pSrcData + pos;
packet.size = tmpLen;
fdPos += tmpLen;
int frameFinished = 0;
int result = avcodec_decode_video2(pContext, pFrame, &frameFinished, &packet);
if(ret < 0){
LOG("avcodec_decode_video2 failed");
}

if(frameFinished){
// TRACE("frameNum: %d, processedLen: %d\n", frameNum ++, processedLen);
SaveFrame_YUV420P_2(pFrame, pContext->width, pContext->height, 0);
}

pos = nextNalPos;
}
processedLen = nextNalPos + 5;
nalType = 1;
}else if(ret == 2){

if(nalType == 0){
packet.data = pSrcData + pos;
packet.size = nextNalPos - pos;
fdPos += packet.size;
int frameFinished = 0;
int result = avcodec_decode_video2(pContext, pFrame, &frameFinished, &packet);
if(ret < 0){
LOG("avcodec_decode_video2 failed");
}

if(frameFinished){
// TRACE("frameNum: %d, processedLen: %d\n", frameNum ++, processedLen);
SaveFrame_YUV420P_2(pFrame, pContext->width, pContext->height, 0);
}

pos = nextNalPos;
}
processedLen = nextNalPos + 5;
nalType = 2;
}else if(ret == 0){

{
packet.data = pSrcData + pos;
packet.size = nextNalPos - pos;
fdPos += packet.size;
int frameFinished = 0;
int result = avcodec_decode_video2(pContext, pFrame, &frameFinished, &packet);
if(ret < 0){
LOG("avcodec_decode_video2 failed");
}

if(frameFinished){
// TRACE("frameNum: %d, processedLen: %d\n", frameNum ++, processedLen);
SaveFrame_YUV420P_2(pFrame, pContext->width, pContext->height, 0);
}

pos = nextNalPos;
}
processedLen = nextNalPos + 5;
nalType = 0;
}
ret = getNextNalPos(pSrcData, processedLen, readLen, &nextNalPos);
}

//TRACE("重新讀取緩存!!位置:%d,processedLength:%d\n", fdPos, processedLen);
fseek(fd, fdPos, SEEK_SET);

if(bBreak){
break;
}
}
SaveFrame_YUV420P_2(pFrame, pContext->width, pContext->height, 0);


stream_decode_finish:
if(pFrame != NULL){
av_freep(&pFrame);
avcodec_free_frame(&pFrame);
}
if(pContext != NULL){
avcodec_close(pContext);
av_free(pContext);
}

if(pSrcData != NULL){
free(pSrcData);
pSrcData = NULL;
}
if(fd != NULL){
fclose(fd);
fd = NULL;
}
return;
}

3、相關說明
圖像的寬高可以從H264碼流的SPS中獲取,獲取方法:
SPS結構裏面有兩個數據成員:pic_width_in_mbs_minus1,pic_height_in_map_units_minus_1,分別表示圖像的寬和高,以宏塊(16x16)的值減1爲單位,因此圖像的實際寬爲((
pic_width_in_mbs_minus1 + 1)<< 4),實際高爲((pic_height_in_map_units_minus_1  + 1)<< 4)。
發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章