ffmpeg 筆記:mp4視頻截圖

方案一:使用命令行處理

命令1

ffmpeg -ss 00:00:05  -i 123.mp4  -y  -t 3 -r 1 -f image2 "123-%d.jpg"
  • -ss:代表跳轉到指定時間開始截圖
  • -i:代表輸入視頻路徑
  • -y:靜默確定
  • -t:代表需要截圖多少時間
  • -r:代表截圖輸入幀率
  • -f:代表輸出圖片格式,這裏使用image2格式
    "123-%d.jpg"代表輸出圖片地址格式化方式
    其實對於這個例子幀率爲1就是1s輸入1幀>,然後截圖3秒,所以最終會輸出3漲圖片

這個方案特別要注意的一點是一定要把 -ss 寫道第一個參數,這樣可以內部會直接跳到這個時間開始截圖,性能大大高

命令2

ffmpeg -i "123.mp4" -vf "select='eq(n\,10)+eq(n\,20)'" -vsync 0 "PIC_%d.jpg"

這個命令代表截取視頻的第10幀和第20幀圖片,並以 "PIC_%d.jpg"格式化存貯

方案二:使用ffmpeg C語言SDK編程處理

我這裏使用的是我最習慣的ffmpeg 3.4.1的版本,帶C++ 11一些語法
代碼中附上幾個小輪子,不夠短,但求清晰

直接貼上封裝的函數,有基礎的直接拿去利用,沒基礎下載後續我放出工程例子Demo.rar

#include <stdio.h>
#include <vector>
#include <functional>

extern "C"
{
#include "libavcodec/avcodec.h"
#include "libavformat/avformat.h"
#include "libswscale/swscale.h"
#include "libavutil/opt.h"
#include "libavutil/imgutils.h"
};
typedef std::function<void(int, ST_PIC_DATA*)> CALLBACK_YUV;
typedef struct
{
	unsigned int iWidth;
	unsigned int iHeight;
	unsigned int iReserveSize;
	char *pData; //yuv數據
} ST_PIC_DATA;

bool YUV2JPEG(const char * jpgpath, ST_PIC_DATA *yuv_data)
	{
		AVFormatContext* pFormatCtx;
		AVOutputFormat* fmt;
		AVStream* video_st;
		AVCodecContext* pCodecCtx;
		AVCodec* pCodec;

		uint8_t* picture_buf =NULL;
		AVFrame* picture;
		AVPacket pkt;
		int y_size;
		int got_picture = 0;
		int size;

		int ret = 0;

		//YUV source  
		int in_w = yuv_data->iWidth, in_h = yuv_data->iHeight;                           //YUV's width and height  
		const char* out_file = jpgpath;    //Output file  



		av_register_all();

		//Method 1  
		pFormatCtx = avformat_alloc_context();
		//Guess format  
		fmt = av_guess_format("mjpeg", NULL, NULL);
		pFormatCtx->oformat = fmt;
		//Output URL  
		if (avio_open(&pFormatCtx->pb, out_file, AVIO_FLAG_READ_WRITE) < 0){
			printf("Couldn't open output file.");
			return -1;
		}

		//Method 2. More simple  
		//avformat_alloc_output_context2(&pFormatCtx, NULL, NULL, out_file);  
		//fmt = pFormatCtx->oformat;  

		video_st = avformat_new_stream(pFormatCtx, 0);
		if (video_st == NULL){
			return -1;
		}
		pCodecCtx = video_st->codec;
		pCodecCtx->codec_id = fmt->video_codec;
		pCodecCtx->codec_type = AVMEDIA_TYPE_VIDEO;
		pCodecCtx->pix_fmt = AV_PIX_FMT_YUVJ420P;

		pCodecCtx->width = in_w;
		pCodecCtx->height = in_h;

		pCodecCtx->time_base.num = 1;
		pCodecCtx->time_base.den = 25;
		//Output some information  
		av_dump_format(pFormatCtx, 0, out_file, 1);

		pCodec = avcodec_find_encoder(pCodecCtx->codec_id);
		if (!pCodec){
			printf("Codec not found.");
			return -1;
		}
		if (avcodec_open2(pCodecCtx, pCodec, NULL) < 0){
			printf("Could not open codec.");
			return -1;
		}
		picture = av_frame_alloc();
		size = avpicture_get_size(pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height);
		picture_buf = (uint8_t *)av_malloc(size);
		if (!picture_buf)
		{
			return -1;
		}
		avpicture_fill((AVPicture *)picture, picture_buf, pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height);

		//Write Header  
		avformat_write_header(pFormatCtx, NULL);

		y_size = pCodecCtx->width * pCodecCtx->height;
		av_new_packet(&pkt, y_size * 3);

		memcpy(picture_buf, (uint8_t*)yuv_data->pData, yuv_data->iReserveSize);
		 

		picture->data[0] = picture_buf;              // Y  
		picture->data[1] = picture_buf + y_size;      // U   
		picture->data[2] = picture_buf + y_size * 5 / 4;  // V  

		//Encode  
		ret = avcodec_encode_video2(pCodecCtx, &pkt, picture, &got_picture);
		if (ret < 0){
			printf("Encode Error.\n");
			return -1;
		}
		if (got_picture == 1){
			pkt.stream_index = video_st->index;
			ret = av_write_frame(pFormatCtx, &pkt);
		}

		av_free_packet(&pkt);
		//Write Trailer  
		av_write_trailer(pFormatCtx);

		printf("Encode Successful.\n");

		if (video_st){
			avcodec_close(video_st->codec);
			av_free(picture);
			av_free(picture_buf);
		}
		avio_close(pFormatCtx->pb);
		avformat_free_context(pFormatCtx);



		return 0;
}

bool SaveVideoYUV(const char * inputFileName, std::vector<int> &frameVec, CALLBACK_YUV pFunc)
	{
		
		AVFormatContext *pFormatCtx = NULL;
		int i, videoStream;
		AVCodecContext *pCodecCtxOrig = NULL;
		AVCodecContext *pCodecCtx = NULL;
		AVCodec *pCodec = NULL;
		AVFrame *pFrame = NULL;
		AVFrame *pFrameYUV = NULL;
		int numBytes;
		uint8_t *out_buffer = NULL;

		int ret, got_picture;
		struct SwsContext *sws_ctx = NULL;

		// Open video file.
		if (avformat_open_input(&pFormatCtx, inputFileName, NULL, NULL) != 0) {
			return false; // Couldn't open file.
		}

		// Retrieve stream information.
		if (avformat_find_stream_info(pFormatCtx, NULL) < 0) {
			return false; // Couldn't find stream information.
		}

		// Dump information about file onto standard error.
#ifdef DEBUG
		av_log_set_level(AV_LOG_DEBUG);
		av_dump_format(pFormatCtx, 0, inputFileName, 0);
		av_log_set_level(AV_LOG_ERROR);
#endif // DEBUG

		// Find the first video stream.
		videoStream = -1;
		for (i = 0; i < pFormatCtx->nb_streams; i++) {
			if (pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
				videoStream = i;
				break;
			}
		}
		if (videoStream == -1) {
			return false; // Didn't find a video stream.
		}


		AVStream *avstream = pFormatCtx->streams[videoStream];

		int frame_rate = avstream->avg_frame_rate.num / avstream->avg_frame_rate.den;//每秒多少幀
		int frame_total = avstream->nb_frames;//總幀數

		// Get a pointer to the codec context for the video stream.
		pCodecCtxOrig = avstream->codec;
		// Find the decoder for the video stream.
		pCodec = avcodec_find_decoder(pCodecCtxOrig->codec_id);
		if (pCodec == NULL) {
			fprintf(stderr, "Unsupported codec!\n");
			return false; // Codec not found.
		}
		// Copy context.
		pCodecCtx = avcodec_alloc_context3(pCodec);
		if (avcodec_copy_context(pCodecCtx, pCodecCtxOrig) != 0) {
			fprintf(stderr, "Couldn't copy codec context");
			return false; // Error copying codec context.
		}

		// Open codec.
		if (avcodec_open2(pCodecCtx, pCodec, NULL) < 0) {
			return false; // Could not open codec.
		}


		//pCodecCtx->gop_size
		// Allocate video frame.
		pFrame = av_frame_alloc();

		// Allocate an AVFrame structure.
		pFrameYUV = av_frame_alloc();
		if (pFrameYUV == NULL) {
			return -1;
		}

		// Determine required buffer size and allocate buffer.
		//numBytes=avpicture_get_size(AV_PIX_FMT_RGB24, pCodecCtx->width, pCodecCtx->height); // Deprecated.

		out_buffer = (uint8_t *)av_malloc(avpicture_get_size(AV_PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height));
		avpicture_fill((AVPicture *)pFrameYUV, out_buffer, AV_PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height);
		sws_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height, AV_PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL);



		int gop_size = 0;//下面這一波是爲了計算出關鍵幀間隔
		AVPacket readPkt;
		memset(&readPkt, 0, sizeof(readPkt));
		av_init_packet(&readPkt);
		while (1) {
			av_packet_unref(&readPkt);
			ret = av_read_frame(pFormatCtx, &readPkt);
			if (ret == AVERROR(EAGAIN))
				continue;
			if (readPkt.stream_index == videoStream){
				if (readPkt.flags&AV_PKT_FLAG_KEY)
				{
					if (gop_size > 0)break;
				}
				gop_size++;
			}
		}
		av_packet_unref(&readPkt);


		for (int index = 0; index < frameVec.size(); index++)
		{
			int frameNum = frameVec[index];
			int timestamp = (frameNum / gop_size)*gop_size / frame_rate;//計算快進值
			int yu = frameNum%gop_size;//計算截圖編碼幀

			//timestamp = 0;
			//yu = frameNum;


			int64_t time = (int64_t)(timestamp / av_q2d(avstream->time_base));

			av_seek_frame(pFormatCtx, videoStream, time, AVSEEK_FLAG_BACKWARD);
			//av_seek_frame(pFormatCtx, videoStream, timestamp * AV_TIME_BASE, AVSEEK_FLAG_BACKWARD);
			avcodec_flush_buffers(pCodecCtx);
			// Read frames and save first five frames to disk.
			int  num = 0;

			AVPacket readPkt2;
			memset(&readPkt2, 0, sizeof(readPkt2));
			av_init_packet(&readPkt2);

	
			while (1) {
				av_packet_unref(&readPkt2);
				ret = av_read_frame(pFormatCtx, &readPkt2);

				if (ret == AVERROR(EAGAIN))
					continue;

				if (readPkt2.stream_index == videoStream) {
					// Decode video frame
					//if (packet.flags)

					ret = avcodec_decode_video2(pCodecCtx, pFrame, &got_picture, &readPkt2);
					if (ret < 0){
						printf("Decode Error.\n");
						return false;
					}
					//printf("num=%d,got_picture=%d\r\n", num, got_picture);

					if (got_picture&& ++num == yu) {

						sws_scale(sws_ctx, (const uint8_t* const*)pFrame->data, pFrame->linesize, 0, pCodecCtx->height,
							pFrameYUV->data, pFrameYUV->linesize);

						ST_PIC_DATA* yuvdata = single_ffmpeg_helper.ConvertAVFrameToYUV420p(pFrameYUV, pCodecCtx->width, pCodecCtx->height);
						//TODO SAVE
						if (pFunc != NULL)pFunc(index + 1, yuvdata);
						free(yuvdata);
						break;
					}
				}
				// Free the packet that was allocated by av_read_frame.
				//av_free_packet(&packet); // Deprecated.
				av_packet_unref(&readPkt2);
			}
		}
		// Free the RGB image.
		av_free(out_buffer);
		av_frame_free(&pFrameYUV);

		// Free the YUV frame.
		av_frame_free(&pFrame);

		// Close the codecs.
		avcodec_close(pCodecCtx);
		avcodec_close(pCodecCtxOrig);

		// Close the video file.
		avformat_close_input(&pFormatCtx);
		return true;
	}
int Format(std::string &str, const char* format, ...)
{
	int ret;
	va_list ap;
	va_start(ap, format);
	char* buf = (char*)malloc(1024);
	if (buf != NULL)
	{
		ret = vsprintf_s(buf, 1024, format, ap);
		str = buf;
		free(buf);
	}
	va_end(ap);
	return ret;
}


int main(){

    std::vector<int> ff;
	ff.push_back(25);
	ff.push_back(30);

	std::string path;
	ffDec.SaveVideoYUV("D:\\123.mp4", ff, [&](int frameindex, ST_PIC_DATA * yuvdata){
		Format(path, "D:\\123_PIC%02d.jpg", frameindex);
		YUV2JPEG(path.data(), yuvdata);
	});

}

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章