基於live555多路rtsp視頻解碼

  對開源live555進行封裝,採用線程池解碼,分爲硬解碼和軟解碼,可動太加載.

#pragma once

extern "C"
{
#include "libavcodec/avcodec.h"
#include "libavformat/avformat.h"
#include "libswscale/swscale.h"
}
#include "liveMedia.hh"
#include "BasicUsageEnvironment.hh"
#include "Decoder.h"
#define REQUEST_STREAMING_OVER_TCP True

class StreamClientState {
public:
  StreamClientState():iter(nullptr),session(nullptr),subsession(nullptr),
  streamTimerTask(nullptr),duration(0.0){}

  virtual ~StreamClientState(){
      delete iter;
      if(session != nullptr){
          UsageEnvironment& env = session->envir();

          env.taskScheduler().unscheduleDelayedTask(streamTimerTask);
          Medium::close(session);
      }
  }

public:
  MediaSubsessionIterator* iter;
  MediaSession* session;
  MediaSubsession* subsession;
  TaskToken streamTimerTask;
  double duration;
};

 class MultiRTSPClient : public RTSPClient
 {
 public:
     MultiRTSPClient(UsageEnvironment& env, char const* rtspURL, int verbosityLevel = 0,
     char const* applicationName = nullptr, portNumBits tunnelOverHTTPPortNum = 0);
     virtual ~MultiRTSPClient();

     Decoder *decoder_ = nullptr;
     StreamClientState scs;
 };

 // RTSP 'response handlers':
 void continueAfterDESCRIBE(RTSPClient* rtspClient, int resultCode, char* resultString);
 void continueAfterSETUP(RTSPClient* rtspClient, int resultCode, char* resultString);
 void continueAfterPLAY(RTSPClient* rtspClient, int resultCode, char* resultString);

 // Other event handler functions:
 void subsessionAfterPlaying(void* clientData); // called when a stream's subsession (e.g., audio or video substream) ends
 void subsessionByeHandler(void* clientData, char const* reason);
   // called when a RTCP "BYE" is received for a subsession
 void streamTimerHandler(void* clientData);
   // called at the end of a stream's expected duration (if the stream has not already signaled its end using a RTCP "BYE")

 // Used to iterate through each stream's 'subsessions', setting up each one:
 void setupNextSubsession(RTSPClient* rtspClient);

 // Used to shut down and close a stream (including its "RTSPClient" object):
 void shutdownStream(RTSPClient* rtspClient, int exitCode = 1);
#include "MultiRTSPClient.h"
#include "DummySink.h"

MultiRTSPClient::MultiRTSPClient(UsageEnvironment &env, const char *rtspURL, int verbosityLevel, const char *applicationName, portNumBits tunnelOverHTTPPortNum):
    RTSPClient(env,rtspURL,verbosityLevel,applicationName,tunnelOverHTTPPortNum, -1)
{
}

MultiRTSPClient::~MultiRTSPClient()
{
    
}

// A function that outputs a string that identifies each stream (for debugging output).  Modify this if you wish:
UsageEnvironment& operator<<(UsageEnvironment& env, const RTSPClient& rtspClient) {
    return env << "[URL:\"" << rtspClient.url() << "\"]: ";
}

// A function that outputs a string that identifies each subsession (for debugging output).  Modify this if you wish:
UsageEnvironment& operator<<(UsageEnvironment& env, const MediaSubsession& subsession) {
    return env << subsession.mediumName() << "/" << subsession.codecName();
}

void continueAfterDESCRIBE(RTSPClient *rtspClient, int resultCode, char *resultString)
{
    do {
        UsageEnvironment& env = rtspClient->envir(); // alias
        StreamClientState& scs = ((MultiRTSPClient*)rtspClient)->scs; // alias

        if (resultCode != 0) {
            env << *rtspClient << "Failed to get a SDP description: " << resultString << "\n";
            delete[] resultString;
            break;
        }

        char* const sdpDescription = resultString;
        env << *rtspClient << "Got a SDP description:\n" << sdpDescription << "\n";

        // Create a media session object from this SDP description:
        scs.session = MediaSession::createNew(env, sdpDescription);
        delete[] sdpDescription; // because we don't need it anymore
        if (scs.session == NULL) {
            env << *rtspClient << "Failed to create a MediaSession object from the SDP description: " << env.getResultMsg() << "\n";
            break;
        } else if (!scs.session->hasSubsessions()) {
            env << *rtspClient << "This session has no media subsessions (i.e., no \"m=\" lines)\n";
            break;
        }

        // Then, create and set up our data source objects for the session.  We do this by iterating over the session's 'subsessions',
        // calling "MediaSubsession::initiate()", and then sending a RTSP "SETUP" command, on each one.
        // (Each 'subsession' will have its own data source.)
        scs.iter = new MediaSubsessionIterator(*scs.session);
        setupNextSubsession(rtspClient);
        return;
    } while (0);

    // An unrecoverable error occurred with this stream.
    shutdownStream(rtspClient);
}
#include <iostream>
void continueAfterSETUP(RTSPClient *rtspClient, int resultCode, char *resultString)
{
    do {
        UsageEnvironment& env = rtspClient->envir(); // alias
        StreamClientState& scs = ((MultiRTSPClient*)rtspClient)->scs; // alias

        if (resultCode != 0) {
            env << *rtspClient << "Failed to set up the \"" << *scs.subsession << "\" subsession: " << resultString << "\n";
            break;
        }

        env << *rtspClient << "Set up the \"" << *scs.subsession << "\" subsession (";
        if (scs.subsession->rtcpIsMuxed()) {
            env << "client port " << scs.subsession->clientPortNum();
        } else {
            env << "client ports " << scs.subsession->clientPortNum() << "-" << scs.subsession->clientPortNum()+1;
        }
        env << ")\n";

        // Having successfully setup the subsession, create a data sink for it, and call "startPlaying()" on it.
        // (This will prepare the data sink to receive data; the actual flow of data from the client won't start happening until later,
        // after we've sent a RTSP "PLAY" command.)

//        std::ostream::fmtflags oldFlag = std::cout.flags();
//        std::cout << std::hex << (int)scs.subsession->fmtp_spropsps()[0] << (int)scs.subsession->fmtp_spropsps()[1] << (int)scs.subsession->fmtp_spropsps()[2] << std::endl;
        scs.subsession->sink = new DummySink(env, *scs.subsession, rtspClient->url(), ((MultiRTSPClient*)rtspClient)->decoder_);
        // perhaps use your own custom "MediaSink" subclass instead
        if (scs.subsession->sink == NULL) {
            env << *rtspClient << "Failed to create a data sink for the \"" << *scs.subsession
                << "\" subsession: " << env.getResultMsg() << "\n";
            break;
        }

        env << *rtspClient << "Created a data sink for the \"" << *scs.subsession << "\" subsession\n";
        scs.subsession->miscPtr = rtspClient; // a hack to let subsession handler functions get the "RTSPClient" from the subsession
        scs.subsession->sink->startPlaying(*(scs.subsession->readSource()),
                                           subsessionAfterPlaying, scs.subsession);
        // Also set a handler to be called if a RTCP "BYE" arrives for this subsession:
        if (scs.subsession->rtcpInstance() != NULL) {
            scs.subsession->rtcpInstance()->setByeWithReasonHandler(subsessionByeHandler, scs.subsession);
        }
    } while (0);
    delete[] resultString;

    // Set up the next subsession, if any:
    setupNextSubsession(rtspClient);
}

void continueAfterPLAY(RTSPClient *rtspClient, int resultCode, char *resultString)
{
    Boolean success = False;

    do {
        UsageEnvironment& env = rtspClient->envir(); // alias
        StreamClientState& scs = ((MultiRTSPClient*)rtspClient)->scs; // alias

        if (resultCode != 0) {
            env << *rtspClient << "Failed to start playing session: " << resultString << "\n";
            break;
        }

        // Set a timer to be handled at the end of the stream's expected duration (if the stream does not already signal its end
        // using a RTCP "BYE").  This is optional.  If, instead, you want to keep the stream active - e.g., so you can later
        // 'seek' back within it and do another RTSP "PLAY" - then you can omit this code.
        // (Alternatively, if you don't want to receive the entire stream, you could set this timer for some shorter value.)
        if (scs.duration > 0) {
            unsigned const delaySlop = 2; // number of seconds extra to delay, after the stream's expected duration.  (This is optional.)
            scs.duration += delaySlop;
            unsigned uSecsToDelay = (unsigned)(scs.duration*1000000);
            scs.streamTimerTask = env.taskScheduler().scheduleDelayedTask(uSecsToDelay, (TaskFunc*)streamTimerHandler, rtspClient);
        }

        env << *rtspClient << "Started playing session";
        if (scs.duration > 0) {
            env << " (for up to " << scs.duration << " seconds)";
        }
        env << "...\n";

        success = True;
    } while (0);
    delete[] resultString;

    if (!success) {
        // An unrecoverable error occurred with this stream.
        shutdownStream(rtspClient);
    }
}

void subsessionAfterPlaying(void *clientData)
{
    MediaSubsession* subsession = (MediaSubsession*)clientData;
    RTSPClient* rtspClient = (RTSPClient*)(subsession->miscPtr);

    // Begin by closing this subsession's stream:
    Medium::close(subsession->sink);
    subsession->sink = NULL;

    // Next, check whether *all* subsessions' streams have now been closed:
    MediaSession& session = subsession->parentSession();
    MediaSubsessionIterator iter(session);
    while ((subsession = iter.next()) != NULL) {
        if (subsession->sink != NULL) return; // this subsession is still active
    }

    // All subsessions' streams have now been closed, so shutdown the client:
    shutdownStream(rtspClient);
}

void subsessionByeHandler(void *clientData, const char *reason)
{
    MediaSubsession* subsession = (MediaSubsession*)clientData;
    RTSPClient* rtspClient = (RTSPClient*)subsession->miscPtr;
    UsageEnvironment& env = rtspClient->envir(); // alias

    env << *rtspClient << "Received RTCP \"BYE\"";
    if (reason != NULL) {
        env << " (reason:\"" << reason << "\")";
        delete[] reason;
    }
    env << " on \"" << *subsession << "\" subsession\n";

    // Now act as if the subsession had closed:
    subsessionAfterPlaying(subsession);
}

void streamTimerHandler(void *clientData)
{
    MultiRTSPClient* rtspClient = (MultiRTSPClient*)clientData;
    StreamClientState& scs = rtspClient->scs; // alias

    scs.streamTimerTask = NULL;

    // Shut down the stream:
    shutdownStream(rtspClient);
}

void setupNextSubsession(RTSPClient *rtspClient)
{
    UsageEnvironment& env = rtspClient->envir(); // alias
    StreamClientState& scs = ((MultiRTSPClient*)rtspClient)->scs; // alias

    scs.subsession = scs.iter->next();
    if (scs.subsession != NULL) {
        if (!scs.subsession->initiate()) {
            env << *rtspClient << "Failed to initiate the \"" << *scs.subsession << "\" subsession: " << env.getResultMsg() << "\n";
            setupNextSubsession(rtspClient); // give up on this subsession; go to the next one
        } else {
            env << *rtspClient << "Initiated the \"" << *scs.subsession << "\" subsession (";
            if (scs.subsession->rtcpIsMuxed()) {
                env << "client port " << scs.subsession->clientPortNum();
            } else {
                env << "client ports " << scs.subsession->clientPortNum() << "-" << scs.subsession->clientPortNum()+1;
            }
            env << ")\n";

            // Continue setting up this subsession, by sending a RTSP "SETUP" command:
            rtspClient->sendSetupCommand(*scs.subsession, continueAfterSETUP, False, REQUEST_STREAMING_OVER_TCP);
        }
        return;
    }

    // We've finished setting up all of the subsessions.  Now, send a RTSP "PLAY" command to start the streaming:
    if (scs.session->absStartTime() != NULL) {
        // Special case: The stream is indexed by 'absolute' time, so send an appropriate "PLAY" command:
        rtspClient->sendPlayCommand(*scs.session, continueAfterPLAY, scs.session->absStartTime(), scs.session->absEndTime());
    } else {
        scs.duration = scs.session->playEndTime() - scs.session->playStartTime();
        rtspClient->sendPlayCommand(*scs.session, continueAfterPLAY);
    }
}

void shutdownStream(RTSPClient *rtspClient, int exitCode)
{
    UsageEnvironment& env = rtspClient->envir(); // alias
    StreamClientState& scs = ((MultiRTSPClient*)rtspClient)->scs; // alias

    // First, check whether any subsessions have still to be closed:
    if (scs.session != NULL) {
        Boolean someSubsessionsWereActive = False;
        MediaSubsessionIterator iter(*scs.session);
        MediaSubsession* subsession;

        while ((subsession = iter.next()) != NULL) {
            if (subsession->sink != NULL) {
                Medium::close(subsession->sink);
                subsession->sink = NULL;

                if (subsession->rtcpInstance() != NULL) {
                    subsession->rtcpInstance()->setByeHandler(NULL, NULL); // in case the server sends a RTCP "BYE" while handling "TEARDOWN"
                }

                someSubsessionsWereActive = True;
            }
        }

        if (someSubsessionsWereActive) {
            // Send a RTSP "TEARDOWN" command, to tell the server to shutdown the stream.
            // Don't bother handling the response to the "TEARDOWN".
            rtspClient->sendTeardownCommand(*scs.session, NULL);
        }
    }

    env << *rtspClient << "Closing the stream.\n";
    Medium::close(rtspClient);
    // Note that this will also cause this stream's "StreamClientState" structure to get reclaimed.

//    if (--rtspClientCount == 0) {
        // The final stream has ended, so exit the application now.
        // (Of course, if you're embedding this code into your own application, you might want to comment this out,
        // and replace it with "eventLoopWatchVariable = 1;", so that we leave the LIVE555 event loop, and continue running "main()".)
//        exit(exitCode);
//    }
}
#pragma once

extern "C"
{
#include "libavcodec/avcodec.h"
#include "libavformat/avformat.h"
#include "libswscale/swscale.h"
}
#include <memory>
#include <functional>

class Decoder
{
public:
    explicit Decoder() = default;
    virtual ~Decoder(){}
    /***
     * @codec   視頻編碼方式
     * @error   錯誤信息
    ***/
    virtual bool Initsize(const AVCodecID codec,std::string &error) = 0;
    /***
     * @format 圖片格式
     * @data   數據
     * @width  圖片寬度
     * @height 圖片高度
     ***/
    virtual void SetFrameCallBack(const std::function<void(const AVPixelFormat format, const u_char *data, const int width, const int height)> frameHandler) = 0;
    /***
     * @pkt 數據包,帶pps和sps
     * @frameHandler 解碼成功後的回調函數,
     *     @format 圖上格式
     *     @data 數據地址
     *     @width 圖片寬度
     *     @height 圖片高度
     * @error 錯誤信息
    ***/
    virtual void Decode(const AVPacket *pkt) = 0;
};
#pragma once
#include <dlfcn.h>
#include <thread>
#include "Decoder.h"
#include "TaskManager.h"

Decoder *loadPlugin(const char *path, TaskManager*t)
{
    void *handle = dlopen(path,RTLD_LAZY);
    if(!handle){
        std::cout << dlerror() << std::endl;
        return nullptr;
    }

    dlerror();
    typedef Decoder*(*CreatePlugin)(TaskManager*);
    CreatePlugin createFun = dlsym(handle,"createDecoder");

    char *error = nullptr;
    if((error = dlerror()) != nullptr){
        std::cout << error << std::endl;
        return nullptr;
    }
    return createFun(t);
}

class DecoderFactory
{
public:
    enum DecoderType{
        Ffmpeg,
        Nvidia
    };
    explicit DecoderFactory() = default;
    virtual ~DecoderFactory(){}
    void Initsize(int thr_num){
        taskManager_ = new TaskManager(thr_num);
    }
    Decoder* MakeDecoder(DecoderType t = Ffmpeg){
        Decoder *d = nullptr;
        switch (t) {
        case Ffmpeg:
            d = loadPlugin("libFfmpegDecoderPlugin.so",taskManager_);
            break;
        case Nvidia:
            d = loadPlugin("libNvidiaDecoderPlugin.so",taskManager_);
            break;
        }
        return d;
    }

private:
    TaskManager *taskManager_;
};
#pragma once
extern "C"
{
#include "libavcodec/avcodec.h"
#include "libavformat/avformat.h"
#include "libswscale/swscale.h"
}
#include "MediaSink.hh"
#include "MediaSession.hh"
#include "Boolean.hh"
#include "UsageEnvironment.hh"
#include "Decoder.h"
#define DUMMY_SINK_RECEIVE_BUFFER_SIZE 100000
#define DEBUG_PRINT_EACH_RECEIVED_FRAME

class DummySink: public MediaSink
{
public:
    DummySink(UsageEnvironment& env, MediaSubsession& subsession, char const* streamId, const Decoder *d);
    virtual ~DummySink();

private:
    static void afterGettingFrame(void* clientData, unsigned frameSize,
                                  unsigned numTruncatedBytes,
                                  struct timeval presentationTime,
                                  unsigned durationInMicroseconds);
    void afterGettingFrame(unsigned frameSize,unsigned numTruncatedBytes,
                            struct timeval presentationTime,
                            unsigned durationInMicroseconds);
    
    Boolean continuePlaying();

    u_int8_t* fReceiveBuffer = nullptr;
    bool isFirstFrame_ = true;
    MediaSubsession& fSubsession;
    char* fStreamId;

    Decoder *decoder_;
    struct timeval pre_time_stamp = {0,0};
    char *p_nalu_tail = nullptr;
    char *nalu_buffer = nullptr;
};
extern "C"
{
#include "libavcodec/avcodec.h"
#include "libavformat/avformat.h"
#include "libswscale/swscale.h"
}
#include "H264VideoRTPSource.hh"
#include "DummySink.h"
#include "Queue.h"

DummySink::DummySink(UsageEnvironment& env, MediaSubsession& subsession, char const* streamId, const Decoder *d):
    MediaSink(env),
    fSubsession(subsession),
    decoder_(d)
{
    fStreamId = ::strDup(streamId);
    if(fStreamId[::strlen(fStreamId) - 1] == '/'){
        fStreamId[::strlen(fStreamId) - 1] = '\0';
    }
    fReceiveBuffer = new u_int8_t[DUMMY_SINK_RECEIVE_BUFFER_SIZE];

    p_nalu_tail = new char[1024*1024];
    nalu_buffer = p_nalu_tail;
}

DummySink::~DummySink()
{
    delete[] fReceiveBuffer;
    delete[] fStreamId;
    delete[] nalu_buffer;
}

void DummySink::afterGettingFrame(void* clientData, unsigned frameSize,
                                  unsigned numTruncatedBytes,
                                  struct timeval presentationTime,
                                  unsigned durationInMicroseconds)
{
    DummySink* sink = (DummySink*)clientData;
    sink->afterGettingFrame(frameSize, numTruncatedBytes, presentationTime, durationInMicroseconds);

}
#include <boost/archive/iterators/base64_from_binary.hpp>
#include <boost/archive/iterators/binary_from_base64.hpp>
#include <boost/archive/iterators/transform_width.hpp>
#include <sstream>
void Base64Decode(const std::string &input, std::string &output)
{
    typedef boost::archive::iterators::transform_width<boost::archive::iterators::binary_from_base64<std::string::const_iterator>, 8, 6> Base64DecodeIterator;
    std::stringstream result;
    try {
        std::copy( Base64DecodeIterator( input.begin() ), Base64DecodeIterator( input.end() ), std::ostream_iterator<char>( result ) );
    } catch ( ... ) {
        return false;
    }
    output = result.str();
    return output.empty() == false;
}

#include <iostream>
void DummySink::afterGettingFrame(unsigned frameSize,unsigned numTruncatedBytes,
                                  struct timeval presentationTime,
                                  unsigned durationInMicroseconds)
{
    // We've just received a frame of data.  (Optionally) print out information about it:
#ifdef DEBUG_PRINT_EACH_RECEIVED_FRAME
    if (fStreamId != NULL) envir() << "Stream \"" << fStreamId << "\"; ";
    envir() << fSubsession.mediumName() << "/" << fSubsession.codecName() << ":\tReceived " << frameSize << " bytes";
    if (numTruncatedBytes > 0) envir() << " (with " << numTruncatedBytes << " bytes truncated)";
    char uSecsStr[6+1]; // used to output the 'microseconds' part of the presentation time
    sprintf(uSecsStr, "%06u", (unsigned)presentationTime.tv_usec);
    envir() << ".\tPresentation time: " << (int)presentationTime.tv_sec << "." << uSecsStr << "\tprotocol:" << fSubsession.protocolName();
    if (fSubsession.rtpSource() != NULL && !fSubsession.rtpSource()->hasBeenSynchronizedUsingRTCP()) {
        envir() << "!"; // mark the debugging output to indicate that this presentation time is not RTCP-synchronized
    }
#ifdef DEBUG_PRINT_NPT
    envir() << "\tNPT: " << fSubsession.getNormalPlayTime(presentationTime);
#endif
    envir() << "\n";
#endif

    u_char const start_code[4]{0x00,0x00,0x00,0x01};
    if(!::strcmp(fSubsession.codecName(),"H264"))
    {
        if (isFirstFrame_)            // 僅每次播放的第一次進入執行本段代碼
        {    // 對視頻數據的SPS,PPS進行補償
            std::string msg;
            decoder_->Initsize(AV_CODEC_ID_H264, msg);
            unsigned numSPropRecords;
            SPropRecord* sPropRecords = parseSPropParameterSets(fSubsession.fmtp_spropparametersets(), numSPropRecords);
//            SPropRecord &sps = sPropRecords[0];
//            SPropRecord &pps = sPropRecords[1];
//            std::cout << std::hex;
//            for(int i = 0; i < sps.sPropLength; i++){
//                std::cout << (int)sps.sPropBytes[i] << " ";
//            }
//            std::cout << std::endl;

            // spydroid v6.8 or spydroid v9.1.
            for (unsigned i = 0; i < numSPropRecords; ++i)
            {
                memcpy(p_nalu_tail, start_code, sizeof(start_code));
                p_nalu_tail += sizeof(start_code);
                memcpy(p_nalu_tail, sPropRecords[i].sPropBytes, sPropRecords[i].sPropLength);
                p_nalu_tail += sPropRecords[i].sPropLength;
                delete[] sPropRecords[i].sPropBytes;
            }
            isFirstFrame_ = false; // 標記SPS,PPS已經完成補償

            memcpy(p_nalu_tail, start_code, sizeof(start_code));
            p_nalu_tail += sizeof(start_code);
            memcpy(p_nalu_tail, fReceiveBuffer, frameSize);
            p_nalu_tail += frameSize;
        }
        else
        {
            if(presentationTime.tv_sec == pre_time_stamp.tv_sec && presentationTime.tv_usec == pre_time_stamp.tv_usec)
            {
                memcpy(p_nalu_tail, start_code, sizeof(start_code));
                p_nalu_tail += sizeof(start_code);
                memcpy(p_nalu_tail, fReceiveBuffer, frameSize);
                p_nalu_tail += frameSize;
            }
            else
            {
                if(p_nalu_tail != nalu_buffer)
                {
                    AVPacket *paket = new AVPacket;
                    av_new_packet(paket, p_nalu_tail - nalu_buffer);
                    memcpy(paket->data , nalu_buffer, p_nalu_tail - nalu_buffer);

                    decoder_->Decode(paket);
                }
                p_nalu_tail = nalu_buffer;
                memcpy(p_nalu_tail, start_code, sizeof(start_code));
                p_nalu_tail += sizeof(start_code);
                memcpy(p_nalu_tail, fReceiveBuffer, frameSize);
                p_nalu_tail += frameSize;
            }
        }
        pre_time_stamp = presentationTime;
    }

    // Then continue, to request the next frame of data:
    continuePlaying();
}

Boolean DummySink::continuePlaying()
{
    if (fSource == NULL) return False; // sanity check (should not happen)

    // Request the next frame of data from our input source.  "afterGettingFrame()" will get called later, when it arrives:
    fSource->getNextFrame(fReceiveBuffer, DUMMY_SINK_RECEIVE_BUFFER_SIZE,
                          afterGettingFrame, this,
                          onSourceClosure, this);
    return True;
}
#pragma once

#include <queue>
#include <condition_variable>
#include <mutex>

template<class T>
class Queue
{
public:
    explicit Queue(int size= -1): size_(size){

    }

    void Put(const T &v){
        std::lock_guard<std::mutex> lock(mtx_);
        if(size_ == -1){
            data_queue_.push(v);
        }else{
            if(data_queue_.size() >= size_){
                data_queue_.pop();
            }
            data_queue_.push(v);
        }
        cv_.notify_one();
    }

    void Quit(){
        quit_ = true;
    }

    const T Pop(bool sync = true){
        if(sync){
            std::unique_lock<std::mutex> lock(mtx_);
            cv_.wait(lock,[this]{return !data_queue_.empty() || quit_;});
            T d;
            if(quit_)return d;
            d = std::move(data_queue_.front());
            data_queue_.pop();
            return d;
        }else{
            std::lock_guard<std::mutex> lock(mtx_);
            T d;
            if(!data_queue_.empty()){
                d = std::move(data_queue_.front());
                data_queue_.pop();
            }

            return d;
        }
    }

private:
    std::queue<T> data_queue_;
    std::condition_variable cv_;
    std::mutex mtx_;
    int size_ = 0;
    bool quit_ = false;
};
#pragma once
#include <map>
#include <functional>
#include <boost/asio/io_service.hpp>
#include <thread>
extern "C"
{
#include "libavformat/avformat.h"
}
#include "MultiRTSPClient.h"
#include "Queue.h"
#include "DecoderFactory.h"

class RTSPClientManager
{
public:
    explicit RTSPClientManager(const int decod_thr_num);
    virtual ~RTSPClientManager();

    void RunEventLoop();
    void OpenRtsp(const std::string &rtsp_url, const DecoderFactory::DecoderType t, const std::function<void(const AVPixelFormat format, const u_char *data, const int width, const int height)> handler);
    void ShutDownClient(const std::string &rtsp_url);

private:
    volatile char eventLoopWatchVariable_ = 0;
    TaskScheduler *scheduler_;
    UsageEnvironment *env_;
    std::mutex handler_mtx_;

    int decode_thr_num_;
    volatile bool thread_pool_quit_ = false;
    DecoderFactory *decoder_factory_;
    std::map<std::string,RTSPClient*> client_map_;
    std::vector<std::thread> decod_threads_vec_;
};
#include <iostream>
#include "RtspClientManager.h"

RTSPClientManager::RTSPClientManager(const int decod_thr_num)
{
    decode_thr_num_ = decod_thr_num;
    scheduler_ = BasicTaskScheduler::createNew();
    env_ = BasicUsageEnvironment::createNew(*scheduler_);
    decoder_factory_ = new DecoderFactory;
    decoder_factory_->Initsize(decod_thr_num);
}

RTSPClientManager::~RTSPClientManager()
{
    env_->reclaim(); env_ = NULL;
    delete scheduler_; scheduler_ = NULL;
    delete decoder_factory_;
}

void RTSPClientManager::RunEventLoop()
{
    env_->taskScheduler().doEventLoop(&eventLoopWatchVariable_);
}

void RTSPClientManager::OpenRtsp(const std::__cxx11::string &rtsp_url, const DecoderFactory::DecoderType t, const std::function<void (const AVPixelFormat, const u_char *, const int, const int)> handler)
{
    //一路rtsp一個decoder,保持sps,pps對應不然會解不出來
    std::lock_guard<std::mutex> lock(handler_mtx_);
    if(client_map_.find(rtsp_url) != client_map_.end())return;
    MultiRTSPClient *client = new MultiRTSPClient(*env_,rtsp_url.data(),1,"multi_rtsp_client");
    client->decoder_ = decoder_factory_->MakeDecoder(t);
    client->decoder_->SetFrameCallBack(handler);
    client->sendDescribeCommand(continueAfterDESCRIBE);
    client_map_.insert(std::make_pair(rtsp_url, client));
}

void RTSPClientManager::ShutDownClient(const std::string &rtsp_url)
{
    std::lock_guard<std::mutex> lock(handler_mtx_);
    auto iter = client_map_.find(rtsp_url);
    if(iter == client_map_.end())return;
    shutdownStream(iter->second);
    client_map_.erase(iter);
}
#pragma once
#include <thread>
#include "boost/asio/io_service.hpp"

class TaskManager
{
public:
    explicit TaskManager(int thr_num);
    boost::asio::io_service& GetIOService(){return service_;}
    virtual ~TaskManager();

private:
    boost::asio::io_service service_;
    boost::asio::io_service::work work_;
    std::vector<std::thread> threads_;
};
#include "TaskManager.h"

TaskManager::TaskManager(int thr_num):
    service_(thr_num),work_(service_)
{
    for(int i = 0; i < thr_num; i++){
        threads_.emplace_back(std::thread([this]{
            service_.run();
        }));
    }
}

TaskManager::~TaskManager()
{
    service_.stop();
    for(auto iter = threads_.begin(); iter != threads_.end();)
    {
        if(iter->joinable()){
            iter->join();
        }
        iter = threads_.erase(iter);
    }
}

 以上是對live555的封裝,下面是解碼模塊分爲軟解和nvidia硬解

#pragma once

#include <atomic>
#include <mutex>
#include "Decoder.h"
#include "TaskManager.h"

class FfmpegDecoder : public Decoder
{
public:
    explicit FfmpegDecoder(TaskManager* t);
    ~FfmpegDecoder();

    bool Initsize(const AVCodecID codec, std::string &msg) override;
    virtual void SetFrameCallBack(const std::function<void(const AVPixelFormat format, const u_char *data, const int width, const int height)> frameHandler) override;
    void Decode(const AVPacket *pkt) override;

private:
    TaskManager *taskManager_;
    static std::atomic_bool ffmpeg_inited_;
    AVFrame *decoded_frame_;
    AVCodec *codec_;
    AVCodecContext *codec_context_;

    std::mutex decod_mtx_;
    u_char* frame_buffer_;
    int video_width_;
    int video_height_;
    bool is_first_frame_ = true;
    std::function<void(const AVPixelFormat format, const u_char *data, const int width, const int height)> frame_Hander_;
};
#include <iostream>
#include "FfmpegDecoder.h"

std::atomic_bool FfmpegDecoder::ffmpeg_inited_;
FfmpegDecoder::FfmpegDecoder(TaskManager *t)
{
    taskManager_ = t;
}

FfmpegDecoder::~FfmpegDecoder()
{
    if(frame_buffer_){
        delete[] frame_buffer_;
    }
    if(decoded_frame_){
        av_frame_free(&decoded_frame_);
    }
    if(codec_context_){
        avcodec_free_context(&codec_context_);
    }
}

bool FfmpegDecoder::Initsize(const AVCodecID codec, std::string &msg)
{
    if(!ffmpeg_inited_){
        avcodec_register_all();
        ffmpeg_inited_ = true;
    }

    decoded_frame_ = av_frame_alloc();
    codec_ = avcodec_find_decoder(codec);
    if(!codec_){
        msg.assign("codec not find");
        return false;
    }
    codec_context_ = avcodec_alloc_context3(codec_);
    if(avcodec_open2(codec_context_, codec_, nullptr) < 0){
        msg.assign("could not open codec");
        return false;
    }

    return true;
}

void FfmpegDecoder::SetFrameCallBack(const std::function<void (const AVPixelFormat, const u_char *, const int, const int)> frameHandler)
{
    frame_Hander_ = frameHandler;
}

void FfmpegDecoder::Decode(const AVPacket *pkt)
{
    std::cout << pkt->size << std::endl;
    taskManager_->GetIOService().post([=]{
        int frameFinished = 0;
        int resCode = -1;
        std::lock_guard<std::mutex> lock(decod_mtx_);
        resCode = avcodec_decode_video2(codec_context_, decoded_frame_, &frameFinished, pkt);
        if(!resCode){
            std::cout << "decode pkg error: " << std::to_string(resCode) << std::endl;
            av_free_packet(pkt);
            delete pkt;
            return;
        }

        if(!frameFinished){
            av_free_packet(pkt);
            delete pkt;
            return;
        }

        if(is_first_frame_){
            video_width_ = codec_context_->width;
            video_height_ = codec_context_->height;
            int numBytes = avpicture_get_size(codec_context_->pix_fmt,video_width_,video_height_);
            frame_buffer_ = (u_char*)av_malloc(numBytes * sizeof(uint8_t));
            is_first_frame_ = false;
        }

        int bytes = 0; //yuv data有3塊內存分別拷,nv12只有2塊內存分別拷
        for(int i = 0; i <video_height_; i++){ //將y分量拷貝
            ::memcpy(frame_buffer_ + bytes,decoded_frame_->data[0] + decoded_frame_->linesize[0] * i, video_width_);
            bytes += video_width_;
        }
        int uv = video_height_ >> 1;
        for(int i = 0; i < uv; i++){ //將u分量拷貝
            ::memcpy(frame_buffer_ + bytes,decoded_frame_->data[1] + decoded_frame_->linesize[1] * i, video_width_ >> 1);
            bytes += video_width_ >> 1;
        }
        for(int i = 0; i < uv; i++){ //將v分量拷貝
            ::memcpy(frame_buffer_ + bytes,decoded_frame_->data[2] + decoded_frame_->linesize[2] * i, video_width_ >> 1);
            bytes += video_width_ >> 1;
        }
        if(frame_Hander_){
            frame_Hander_(codec_context_->pix_fmt, frame_buffer_, video_width_, video_height_);
        }
        av_free_packet(pkt);
        delete pkt;
    });
}
extern "C"
Decoder* createDecoder(TaskManager* t)
{
    return new FfmpegDecoder(t);
}
#ifndef NVIDIADECODER_H
#define NVIDIADECODER_H

#include <vector>
#include "Utils/FFmpegDemuxer.h"
#include "NvDecoder.h"
#include "Decoder.h"
#include "TaskManager.h"

class NvidiaDecoder : public Decoder
{
public:
    explicit NvidiaDecoder(TaskManager* t);
    ~NvidiaDecoder();

    bool Initsize(const AVCodecID codec,std::string &error) override;

    void SetFrameCallBack(const std::function<void(const AVPixelFormat format, const u_char *data, const int width, const int height)> frameHandler) override;

    void Decode(const AVPacket *pkt) override;

private:
    NvDecoder *m_nvdecod{nullptr};

    TaskManager *taskManager_;
    std::mutex decod_mtx_;
    u_char* frame_buffer_;
    int video_width_;
    int video_height_;
    bool is_first_frame_ = true;
    std::function<void(const AVPixelFormat format, const u_char *data, const int width, const int height)> frame_Hander_;
};

#endif // NVIDIADECODER_H
#include "NvidiaDecoder.h"

simplelogger::Logger *logger = simplelogger::LoggerFactory::CreateConsoleLogger();
bool isInitsized = false;
int gcurIndex = 0;
std::mutex gmtx;
std::vector<std::pair<CUcontext,std::string>> m_ctxV;
NvidiaDecoder::NvidiaDecoder(TaskManager *t)
{
    taskManager_ = t;
}

NvidiaDecoder::~NvidiaDecoder()
{
    if(m_nvdecod)
    delete m_nvdecod;
}

void NvidiaDecoder::Decode(const AVPacket *pkt)
{
    if(!m_ctxV.size()){
        return;
    }

    taskManager_->GetIOService().post([=]{
        try{
            std::lock_guard<std::mutex> lock(decod_mtx_);
            int nFrameReturned = 0;
            uint8_t **ppFrame;
            m_nvdecod->Decode(pkt->data, pkt->size, &ppFrame, &nFrameReturned);
            if (!nFrameReturned)
                LOG(INFO) << m_nvdecod->GetVideoInfo();

            for (int i = 0; i < nFrameReturned; i++) {
                if (m_nvdecod->GetBitDepth() == 8){
                    frame_buffer_ = ppFrame[i];
                    frame_Hander_(AV_PIX_FMT_NV12,frame_buffer_,m_nvdecod->GetWidth(),m_nvdecod->GetHeight());
                }else{
                    //                    P016ToBgra32((uint8_t *)ppFrame[i], 2 * dec.GetWidth(), (uint8_t *)dpFrame, nPitch, dec.GetWidth(), dec.GetHeight());
                }
            }

            return;
        }catch(std::exception &e){
            std::cout << __func__ << ":" << e.what() << std::endl;
            return;
        }
    });
}

void NvidiaDecoder::SetFrameCallBack(const std::function<void (const AVPixelFormat, const u_char *, const int, const int)> frameHandler)
{
    frame_Hander_ = frameHandler;
}

bool NvidiaDecoder::Initsize(const AVCodecID codec, std::string &error)
{
    gmtx.lock();
    if(!isInitsized){
        ck(cuInit(0));
        int nGpu = 0;
        ck(cuDeviceGetCount(&nGpu));
        for(int i = 0; i < nGpu; i++){
            CUdevice cuDevice = 0;
            ck(cuDeviceGet(&cuDevice, i));
            char szDeviceName[80];
            ck(cuDeviceGetName(szDeviceName, sizeof(szDeviceName), cuDevice));
            CUcontext cuContext = NULL;
            ck(cuCtxCreate(&cuContext, CU_CTX_SCHED_BLOCKING_SYNC, cuDevice));
            LOG(INFO) << "Find Gpu: " << szDeviceName << std::endl;

            CUVIDDECODECAPS videoDecodeCaps = {};
            videoDecodeCaps.eCodecType = cudaVideoCodec_H264;
            videoDecodeCaps.eChromaFormat = cudaVideoChromaFormat_420;
            videoDecodeCaps.nBitDepthMinus8 = 0;
            CUresult resCode;
            if ((resCode = cuvidGetDecoderCaps(&videoDecodeCaps)) == CUDA_SUCCESS){
                LOG(INFO) << "cuvid Decoder Caps nMaxWidth " << videoDecodeCaps.nMaxWidth << " nMaxHeigth " << videoDecodeCaps.nMaxHeight << std::endl;
                if(videoDecodeCaps.nMaxWidth >= 1920 && videoDecodeCaps.nMaxHeight >= 1080){
                    m_ctxV.push_back({cuContext,szDeviceName});
                }
            }else{
                LOG(INFO) << "cuvidGetDecoderCaps failed, Code: " << resCode << std::endl;
            }
        }
        isInitsized = true;
        LOG(INFO) << "nvidia decoder initsized end " << isInitsized << " ptr is " << &isInitsized << std::endl;
    }
    gmtx.unlock();

    if(m_ctxV.empty()){
        error = "no context for this width and height";
        return false;
    }


    std::pair<CUcontext,std::string> v = m_ctxV.at(gcurIndex++ % m_ctxV.size());
    std::cout << "Use Contex in" << v.second << " ctx size " << m_ctxV.size() << std::endl;
    m_nvdecod = new NvDecoder(v.first, false, FFmpeg2NvCodecId(codec), nullptr);
    return true;
}

extern "C"
Decoder* createDecoder(TaskManager* t)
{
    return new NvidiaDecoder(t);
}

   NvDecoder爲官方SDK的類,與該類相關部分就不貼出來了

使用示例如下

#include <iostream>
#include "opencv2/opencv.hpp"
#include "RtspClientManager.h"

int main()
{
    RTSPClientManager rtspManager(6);
    std::thread o_thread([&]{
        std::this_thread::sleep_for(std::chrono::seconds(4));
        rtspManager.OpenRtsp("rtsp://192.168.2.253:8554/test.264", DecoderFactory::Ffmpeg, [](const AVPixelFormat format, const u_char *data, const int width, const int height){
            std::cout << "format:" << format << " width: " << width << " height:" << height << std::endl;
            if(format == AV_PIX_FMT_YUV420P){
                cv::Mat frame(3 * height / 2, width, CV_8UC1, (void*)data);
                cv::cvtColor(frame, frame, CV_YUV2BGR_I420); //很耗cpu,i5 6核、384*288 30%左右.
                cv::imshow("small v" ,frame);
                cv::waitKey(1);
            }else if(format == AV_PIX_FMT_NV12)
            {
                cv::Mat frame(3 * height / 2, width, CV_8UC1, (void*)data);
                cv::cvtColor(frame, frame, CV_YUV2BGR_NV12); //很耗cpu,i5 6核、384*288 30%左右.
                cv::imshow("small v" ,frame);
                cv::waitKey(1);
            }
        });
        rtspManager.OpenRtsp("rtsp://192.168.2.38:5554/2", DecoderFactory::Nvidia, [](const AVPixelFormat format, const u_char *data, const int width, const int height){
            std::cout << "format:" << format << " width: " << width << " height:" << height << std::endl;
            if(format == AV_PIX_FMT_YUVJ420P){
                cv::Mat frame(3 * height / 2, width, CV_8UC1, (void*)data);
                cv::cvtColor(frame, frame, CV_YUV2BGR_I420); //很耗cpu,i5 6核、384*288 30%左右.
                cv::imshow("big v" ,frame);
                cv::waitKey(1);
            }else if(format == AV_PIX_FMT_NV12)
            {
                cv::Mat frame(3 * height / 2, width, CV_8UC1, (void*)data);
                cv::cvtColor(frame, frame, CV_YUV2BGR_NV12); //很耗cpu,i5 6核、384*288 30%左右.
                cv::imshow("big v" ,frame);
                cv::waitKey(1);
            }
        });
//        std::this_thread::sleep_for(std::chrono::seconds(5));
//        rtspManager.ShutDownClient("rtsp://192.168.2.253:8554/test.264");
    });
    rtspManager.RunEventLoop();

    return 0;
}

 

發佈了82 篇原創文章 · 獲贊 58 · 訪問量 9萬+
發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章