簡單的Kinect前景摳圖


    這個是用Kinect做三維掃描時的第一步,就是將感興趣的前景部分取下來。大家都知道,Kinect的最大特點就是可以採集到深度數據,利用深度數據就可以將前景和背景區分開來。

    長話短說,先上效果圖吧。

 

再上源代碼:

 

/***************************************
* Description:This program can rebuild  
* models by scanning objects with kinect
*
* Methods of application:
* 
*
* Author:Lixam
* Time:2012-11
****************************************/
#include "stdafx.h"

#include <XnCppWrapper.h> 
#include <opencv/highgui.h>
#include <opencv/cv.h>
#include <vector>
using namespace cv;
using namespace xn;
using namespace std;


#define IMAGEWIDTH 640
#define IMAGEHEIGTH 480

typedef struct 
{
	unsigned char R;
	unsigned char G;
	unsigned char B;
}RGBPIXEL;

typedef struct 
{
	unsigned char data_L;	//Low bits 
	unsigned char data_H;	//Heigh bits
}DEPTHPIXEL;

//Generator  
ImageGenerator m_imageGenerator;
DepthGenerator m_depthGenerator;
Context m_context;

/*************************************************************************
 *
 * GetROIdata()
 *
 * Parameters:
 *
 * IplImage* image		- Source of color image data
 * IplImage* depth		- Source of color depth data
 * vector<RGBPIXEL>& ROIimage	- Color iamge data that we are interested in
 * vector<DEPTHPIXEL>& ROIdepth	- Depth data that we are interested in
 * vector<CvPoint>& ROIimagePoint	- Coordinates of ROI points 
 *
 * Return:
 *
 * void				
 * 
 * Describtion:
 *
 * This function can get the data we are interested in from image source
 * 
 ************************************************************************/
void GetROIdata(IplImage* image,IplImage* depth,
				vector<RGBPIXEL>& ROIimage,vector<DEPTHPIXEL>& ROIdepth,
				vector<CvPoint>& ROIimagePoint)
{
	RGBPIXEL rgbData;
	DEPTHPIXEL depthData;
	CvPoint point;
	for(int j = 0;j < IMAGEHEIGTH;j++)	//We just want to get depth datas rang from 1000mm to 1500mm
			for(int i = 0;i < IMAGEWIDTH;i++)
	if(CV_IMAGE_ELEM(depth,unsigned char,j,i*2)+(CV_IMAGE_ELEM(depth,unsigned char,j,i*2+1)<<8) >= 1000
		&& CV_IMAGE_ELEM(depth,unsigned char,j,i*2)+(CV_IMAGE_ELEM(depth,unsigned char,j,i*2+1)<<8) <= 1500)
	{
		rgbData.R = CV_IMAGE_ELEM(image,unsigned char,j,i*3);	//Get image data
		rgbData.G = CV_IMAGE_ELEM(image,unsigned char,j,i*3 + 1);
		rgbData.B = CV_IMAGE_ELEM(image,unsigned char,j,i*3 + 2);
		
		depthData.data_L = CV_IMAGE_ELEM(depth,unsigned char,j,i*2);	//Get depth data
		depthData.data_H = CV_IMAGE_ELEM(depth,unsigned char,j,i*2 + 1);

		point.x = i;
		point.y = j;
		ROIimagePoint.push_back(point);
		ROIimage.push_back(rgbData);
		ROIdepth.push_back(depthData);
	}
}

/**********Test function**************/
void ShowROIimage(vector<RGBPIXEL> ROIimage1,vector<RGBPIXEL> ROIimage2,
				  vector<CvPoint> ROIimagePoint1,vector<CvPoint> ROIimagePoint2)
{
	IplImage* image1 = cvCreateImage(cvSize(IMAGEWIDTH,IMAGEHEIGTH),IPL_DEPTH_8U,3);
	IplImage* image2 = cvCreateImage(cvSize(IMAGEWIDTH,IMAGEHEIGTH),IPL_DEPTH_8U,3);
	cvZero(image1);
	cvZero(image2);
	vector<CvPoint>::iterator pointItr;
	vector<RGBPIXEL>::iterator rgbItr;
	
	if(ROIimagePoint1.size()&&ROIimagePoint2.size())
	{
		/***Process first pcture****/
		for(pointItr=ROIimagePoint1.begin(),rgbItr=ROIimage1.begin();
			pointItr != ROIimagePoint1.end(),rgbItr!=ROIimage1.end();
			pointItr++,rgbItr++)
		{
			CV_IMAGE_ELEM(image1,unsigned char,(*pointItr).y,(*pointItr).x*3) = (*rgbItr).R;
			CV_IMAGE_ELEM(image1,unsigned char,(*pointItr).y,(*pointItr).x*3+1) = (*rgbItr).G;
			CV_IMAGE_ELEM(image1,unsigned char,(*pointItr).y,(*pointItr).x*3+2) = (*rgbItr).B;
		}//end for
		/***Process second pcture****/
		for(pointItr=ROIimagePoint2.begin(),rgbItr=ROIimage2.begin();
			pointItr != ROIimagePoint2.end(),rgbItr!=ROIimage2.end();
			pointItr++,rgbItr++)
		{
			CV_IMAGE_ELEM(image2,unsigned char,(*pointItr).y,(*pointItr).x*3) = (*rgbItr).R;
			CV_IMAGE_ELEM(image2,unsigned char,(*pointItr).y,(*pointItr).x*3+1) = (*rgbItr).G;
			CV_IMAGE_ELEM(image2,unsigned char,(*pointItr).y,(*pointItr).x*3+2) = (*rgbItr).B;
		}//end for
	}//end if
	/********show images********/
	cvCvtColor(image1,image1,CV_RGB2BGR);
	cvCvtColor(image2,image2,CV_RGB2BGR);
	cvShowImage("image1",image1);
	cvShowImage("image2",image2);
}

int main(int argc, _TCHAR* argv[])
{
	XnStatus res;
	vector<RGBPIXEL> ROIimage1,ROIimage2;
	vector<DEPTHPIXEL> ROIdepth1,ROIdepth2;
	vector<CvPoint> ROIimagePoint1,ROIimagePoint2;

	/**********Initate a My3D intance********/
	res = m_context.Init();
	res = m_imageGenerator.Create(m_context);
	res = m_depthGenerator.Create(m_context);

	ImageMetaData ImageMap;
	DepthMetaData DepthMap;
	IplImage* depthImg = cvCreateImage(cvSize(IMAGEWIDTH,IMAGEHEIGTH),IPL_DEPTH_16U,1);
	IplImage* image = cvCreateImage(cvSize(IMAGEWIDTH,IMAGEHEIGTH),IPL_DEPTH_8U,3);


	//Set out mode
	XnMapOutputMode mapMode;
	mapMode.nXRes = IMAGEWIDTH;    
    mapMode.nYRes = IMAGEHEIGTH;   
    mapMode.nFPS = 30;   
    res = m_imageGenerator.SetMapOutputMode(mapMode);
	res = m_depthGenerator.SetMapOutputMode(mapMode);

	/*******Test codes*************/
	cvNamedWindow("image1",CV_WINDOW_AUTOSIZE);
	cvNamedWindow("image2",CV_WINDOW_AUTOSIZE);
	/*********/
	m_depthGenerator.GetAlternativeViewPointCap().SetViewPoint(m_imageGenerator);

	//Generate data
    m_context.StartGeneratingAll();  
    res = m_context.WaitAndUpdateAll(); 
    int imageCount = 0;	//Count frame's number   
    while(!m_context.WaitAndUpdateAll())	  
    {   
		res = m_context.WaitAndUpdateAll();  
		m_imageGenerator.GetMetaData(ImageMap);
		m_depthGenerator.GetMetaData(DepthMap);
		memcpy(image->imageData,ImageMap.Data(),IMAGEWIDTH*IMAGEHEIGTH*3);
		memcpy(depthImg->imageData,DepthMap.Data(),IMAGEWIDTH*IMAGEHEIGTH*2);
		/*************Get tow pictures from different time**********/
		if(0 == imageCount)
		{
			/*************Get interested rect*************/
			GetROIdata(image,depthImg,ROIimage1,ROIdepth1,ROIimagePoint1);
			imageCount++;
		}
		else if(1 == imageCount)
		{
			GetROIdata(image,depthImg,ROIimage2,ROIdepth2,ROIimagePoint2);
			imageCount = 0;
			
			ShowROIimage(ROIimage1,ROIimage2,
				  ROIimagePoint1,ROIimagePoint2)
			/***********Clear all vector*******************/
			ROIimage1.erase(ROIimage1.begin(),ROIimage1.end());
			ROIimage2.erase(ROIimage2.begin(),ROIimage2.end());
			ROIdepth1.erase(ROIdepth1.begin(),ROIdepth1.end());
			ROIdepth2.erase(ROIdepth2.begin(),ROIdepth2.end());
			ROIimagePoint1.erase(ROIimagePoint1.begin(),ROIimagePoint1.end());
			ROIimagePoint2.erase(ROIimagePoint2.begin(),ROIimagePoint2.end());
		}
		else
			imageCount++;
		cvWaitKey(20);
    }//end while 
	m_context.StopGeneratingAll();  
    m_context.Shutdown(); 
    cvReleaseImage(&depthImg); 
	cvReleaseImage(&image); 

	return 0;
}


以上代碼運行結果跟我貼的圖有所區別,這是我修改過的,沒有邊緣檢測和顯示部分,最後顯示的是不同時刻採集到的兩幅RGB圖像。當物體距離傳感器1米到1.5米範圍內會被顯示。

 

程序代碼就不解釋了,比較基礎,大家自己看吧。。。適合給剛接觸Kinect的朋友們參考。

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章