基於live555的rtsp客戶端接收及ffmpeg解碼

原文鏈接:http://blog.chinaunix.net/uid-15063109-id-4482932.html?page=4

很多人用live555都是爲了做一個rtsp的客戶端。
Live555提供了一個功能豐富的rtsp客戶端:openRTSP。很多初學者都是通過它來學習live及rtsp的。這個程序修改做單路播放很容易,不過,一般客戶端需要同時做多路播放或錄像,這時再採用這個程序就比較麻煩了。而且,程序裏也註明:
// NOTE: If you want to develop your own RTSP client application (or embed RTSP client functionality into your own application),
// then we don’t recommend using this code as a model, because it is too complex (with many options).
// Instead, we recommend using the “testRTSPClient” application code as a model.

建議用testRTSPClient,代碼簡潔,但也足夠用了。

testRTSPClient.cpp,本地接收流數據後,簡單log一下,沒做任何處理,這樣正合適改造,而且它支持多路。下面簡單以這個cpp爲例,封裝一個可重用的class demo來。

先簡單分析一下流程:
1 openURL, 開始播放。
2 在openURL裏面,調用sendDescribeCommand,向服務器端發請求。然後,通過回調函數處理。
3 如果沒有錯誤的話,env->taskScheduler().doEventLoop(&eventLoopWatchVariable);這裏阻塞執行。
4 DummySink,這個是數據的回調,DummySink::afterGettingFrame這裏取到數據。

在這個程序裏,main裏面調用:

for (int i = 1; i <= argc-1; ++i) {
openURL(*env, argv[0], argv[i]);
}

void shutdownStream(RTSPClient* rtspClient, int exitCode = 1);
這裏是結束某個流,rtspClient是由openURL創建的。
這就實現了多路的同時播放。
如果要簡單地處理,其實只要把openURL和shutdownStream封裝成起來就可以了。

下面是簡單接口的示例:
class CRTSPSession
{
public:
CRTSPSession();
virtual ~CRTSPSession();
int startRTSPClient(char const* progName, char const* rtspURL, int debugLevel);
int stopRTSPClient();
int openURL(UsageEnvironment& env, char const* progName, char const* rtspURL, int debugLevel);

RTSPClient* m_rtspClient;

char eventLoopWatchVariable;

pthread_t tid;

bool m_running;

string m_rtspUrl;

string m_progName;

int m_debugLevel;

static void *rtsp_thread_fun (void *param);

void rtsp_fun();

};

CRTSPSession::CRTSPSession()
{
m_rtspClient = NULL;
m_running = false;
eventLoopWatchVariable = 0;
}
CRTSPSession::~CRTSPSession()
{
}
int CRTSPSession::startRTSPClient(char const* progName, char const* rtspURL, int debugLevel)
{
m_progName = progName;
m_rtspUrl = rtspURL;
m_debugLevel = debugLevel;
eventLoopWatchVariable = 0;
int r = pthread_create(&tid, NULL, rtsp_thread_fun, this);
if ®
{
perror (“pthread_create()”);
return -1;
}
return 0;
}
int CRTSPSession::stopRTSPClient()
{
eventLoopWatchVariable = 1;
return 0;
}
void CRTSPSession::rtsp_thread_fun(void param)
{
CRTSPSession pThis = (CRTSPSession)param;
pThis->rtsp_fun ();
return NULL;
}
void CRTSPSession::rtsp_fun()
{
//::startRTSP(m_progName.c_str(), m_rtspUrl.c_str(), m_ndebugLever);
TaskScheduler
scheduler = BasicTaskScheduler::createNew();
UsageEnvironment
env = BasicUsageEnvironment::createNew(*scheduler);
if (openURL(*env, m_progName.c_str(), m_rtspUrl.c_str(), m_debugLevel) == 0)
{
m_nStatus = 1;
env->taskScheduler().doEventLoop(&eventLoopWatchVariable);

m_running = false;
eventLoopWatchVariable = 0;

if (m_rtspClient)
{
shutdownStream(m_rtspClient,0);
}
m_rtspClient = NULL;
}

env->reclaim();
env = NULL;
delete scheduler;
scheduler = NULL;
m_nStatus = 2;
}
int CRTSPSession::openURL(UsageEnvironment& env, char const* progName, char const* rtspURL, int debugLevel)
{
m_rtspClient = ourRTSPClient::createNew(env, rtspURL, debugLevel, progName);
if (m_rtspClient == NULL)
{
env << “Failed to create a RTSP client for URL “” << rtspURL << “”: " << env.getResultMsg() << “\n”;
return -1;
}
((ourRTSPClient*)m_rtspClient)->m_nID = m_nID;
m_rtspClient->sendDescribeCommand(continueAfterDESCRIBE);
return 0;
}
// A function that outputs a string that identifies each stream (for debugging output). Modify this if you wish:
UsageEnvironment& operator<<(UsageEnvironment& env, const RTSPClient& rtspClient) {
return env << “[URL:”” << rtspClient.url() << “”]: ";
}
// A function that outputs a string that identifies each subsession (for debugging output). Modify this if you wish:
UsageEnvironment& operator<<(UsageEnvironment& env, const MediaSubsession& subsession) {
return env << subsession.mediumName() << “/” << subsession.codecName();
}
void usage(UsageEnvironment& env, char const* progName) {
env << “Usage: " << progName << " … \n”;
env << “\t(where each is a “rtsp://” URL)\n”;
}
這個簡單的class,是在testRTSPClient.cpp上簡單修改的,其他的函數都保持不變,只是把open和shutdown合在了一個class裏面,然後啓動一個線程。
因爲這裏的
env->taskScheduler().doEventLoop(&eventLoopWatchVariable);是阻塞的。當eventLoopWatchVariable爲1的時候,live的doEventLoop結束循環。

testRTSPClient.cpp裏的做法是,當eventLoopWatchVariable爲1的時候,結束所有流。而實際的客戶端可以任意選擇某一路停止,其他還是播放,所以爲每一路創建一個線程,這樣可以控制只停止該路。

最後,
DummySink::afterGettingFrame

這裏取到媒體數據後,可以通過自己設計的回調傳出來。可以用回調函數,可以用抽象基類的方法,甚至都可以SendMessage直接發到某個窗口上。

另外,其實live555的doEventLoop設計的很靈活的,完全可以做成非阻塞。但本文的目的是幫助live555的初學者,在還沒完全掌握的情況下,自己可以簡單做一個工具,用來實現rtsp的接收處理。通過這個實例,也能更方便地理解rtsp的工作方式。

順便說說上面class的調用:

CRTSPSession* pRtsp = new CRTSPSession;

if (pRtsp->startRTSPClient(progName, rtspURL, debugLevel))

{

delete pRtsp;

pRtsp = NULL;

return -1;

}

停止的時候:

pRtsp->stopRTSPClient();

delete pRtsp;

pRtsp = NULL;

順便把收到的視頻解碼也簡易封裝一下:

class CDecodeCB

{

public:

virtual void videoCB(int width, int height, uint8_t* buff, int len)=0;

};

class CFfmpegDecode

{

public:

CFfmpegDecode();

~CFfmpegDecode();

int initFFMPEG();

int openDecoder(int width, int height, CDecodeCB* pCB);

int closeDecoder();

int decode_rtsp_frame(uint8_t* input,int nLen,bool bWaitIFrame /= false/);

private:

bool m_bInit;

AVCodec *decode_codec;

AVCodecContext *decode_c;

AVFrame *decode_picture;

struct SwsContext *img_convert_ctx;

CDecodeCB* m_pCB;

int m_nWidth;

int m_nHeight;

};

static int sws_flags = SWS_BICUBIC;

static int sws_flags = SWS_BICUBIC;
CFfmpegDecode::CFfmpegDecode()
{
m_bInit = false;
img_convert_ctx = NULL;
}

CFfmpegDecode::~CFfmpegDecode()
{
av_lockmgr_register(NULL);
}

int CFfmpegDecode::initFFMPEG()
{
//m_state = RC_STATE_INIT;
avcodec_register_all();
av_register_all();
//avformat_network_init();

//if (av_lockmgr_register(lockmgr))
{
   // m_state = RC_STATE_INIT_ERROR;
 //   return -1;
}
return 0;

}
int CFfmpegDecode::openDecoder(int width, int height,CDecodeCB* pCB)
{
m_nWidth = width;
m_nHeight = height;
m_pCB = pCB;
if (m_bInit)
return -1;
decode_codec = avcodec_find_decoder(CODEC_ID_H264);
if (!decode_codec)
{
fprintf(stderr, “codec not found\n”);
return -2;
}

decode_c= avcodec_alloc_context3(decode_codec);
decode_c->codec_id= CODEC_ID_H264;
decode_c->codec_type = AVMEDIA_TYPE_VIDEO;
decode_c->pix_fmt = PIX_FMT_YUV420P;


decode_picture= avcodec_alloc_frame();


if (avcodec_open2(decode_c, decode_codec, NULL) < 0)
{
 //  fprintf(stderr, "could not open codec\n");
   return -3;
}
m_bInit = true;
return 0;

}

int CFfmpegDecode::closeDecoder()
{
if(decode_c)
{
avcodec_close(decode_c);
av_free(decode_c);
}
if(decode_picture)
av_free(decode_picture);

m_bInit = false;

}

int CFfmpegDecode::decode_rtsp_frame(uint8_t* input,int nLen,bool bWaitIFrame /= false/)
{
if(!m_bInit)
return -1;

if(input == NULL || nLen <= 0)
    return -2;




try{
    int got_picture;
    int size = nLen;




    AVPacket avpkt;
    av_init_packet(&avpkt);
    avpkt.size = size;
    avpkt.data = input;


    //while (avpkt.size > 0)
    {


        int len = avcodec_decode_video2(decode_c, decode_picture, &got_picture, &avpkt);


        if(len == -1)
        {
            return -3;
        }


        if (got_picture)
        {
            int w = decode_c->width;
            int h = decode_c->height;
            int numBytes=avpicture_get_size(PIX_FMT_RGB24, w,h);
            uint8_t * buffer=(uint8_t *)av_malloc(numBytes*sizeof(uint8_t));


            AVFrame *pFrameRGB = avcodec_alloc_frame();
            avpicture_fill((AVPicture *)pFrameRGB, buffer,PIX_FMT_RGB24,  w, h);


            img_convert_ctx = sws_getCachedContext(img_convert_ctx,
                                        w, h, (PixelFormat)(decode_picture->format), w, h,PIX_FMT_RGB24, sws_flags, NULL, NULL, NULL);
            if (img_convert_ctx == NULL)
            {
                fprintf(stderr, "Cannot initialize the conversion context\n");
                //exit(1);
                return -4;
            }
            sws_scale(img_convert_ctx, decode_picture->data, decode_picture->linesize,
                0, h, pFrameRGB->data, pFrameRGB->linesize);


            if (m_pCB)
            {
                m_pCB->videoCB(w, h, pFrameRGB->data[0], numBytes*sizeof(uint8_t));
            }


            av_free(buffer);
            av_free(pFrameRGB);
            return 0;


            if (avpkt.data)
            {
                avpkt.size -= len;
                avpkt.data += len;
            }
        }
        else
        {
            return -5;
        }
        //return 0;
    }


    //return 0;




}
catch(...)
{
}
return -6;

}

代碼參考ffplay.c, decode_encode.c。

如果多線程下有問題,記得
av_lockmgr_register。

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章