最近在學習OpenCV,一般看官方一邊看書,發現自己原來用的很多接口早已被更新,分享一下學習心得體會,也希望大家可以不吝賜教!
首先看到在Mastering OpenCV with Practical Computer Vision Projects書中,特徵點檢測,特徵點描述(特徵提取),特徵點匹配用了以下代碼:
- cv::Ptr<cv::FeatureDetector> detector = new cv::ORB(1000); // 創建orb特徵點檢測
- cv::Ptr<cv::DescriptorExtractor> extractor = new cv::FREAK(true, true); // 用Freak特徵來描述特徵點
- cv::Ptr<cv::DescriptorMatcher> matcher = new cv::BFMatcher(cv::NORM_HAMMING, // 特徵匹配,計算Hamming距離
- class CV_EXPORTS_W ORB : public Feature2D
- class CV_EXPORTS_W Feature2D : public FeatureDetector, public DescriptorExtractor
原來ORB,Feature2D,FeatureDetecter以及DescriptorExtractor之間是這樣的繼承關係,所以我們可以new一個ORB對象給FeatureDetecter指針了。再搜索一下文檔和頭文件,還有更多的檢測方法,特徵描述可以使用。其中ORB既可以作爲檢測器,也可以作特徵提取。
可以作爲檢測器的還有BRISK,MSER(特徵區域),FastFeatureDetector(應該就是Orb吧?),StarDetector等等。
可以作特徵提取的描述器包括BriefDescriptorExtractor(應該就是Orb吧?)Freak,OpponentColorDescriptorExtractor等等。
原來OpenCV提供了好多現成的方法,好方便……感嘆一下,以前辛辛苦苦碼的代碼很多就浪費了,用好工具還是很重要那!>_< 順帶一提的是,2.4.5中新增的CLAHE二值化方法也是類似的調用方法:
- cv::Ptr<cv::CLAHE> cl = createCLAHE(80, Size(4, 4))
- cl->apply(imSrc, ImDst);
CV自帶的特徵匹配和特徵匹配結果繪製函數簡直易用的令人髮指……請看:
- vector<DMatch> matches;
- matcher->match(descriptors1, descriptors2, matches);
- Mat imResultOri;
- drawMatches(img1, keypoints1, img2, keypoints2, matches, imResultOri, CV_RGB(0,255,0), CV_RGB(0,255,0));
當然啦,做完特徵點匹配,我們還可以通過RANSAC方法計算透視變換矩陣來篩選符合相同透視的特徵點,這樣做可以去除很多錯誤的匹配。
- std::vector<unsigned char> inliersMask(srcPoints.size());
- <span style="font-family: Arial, Helvetica, sans-serif;">homography = cv::findHomography(srcPoints, dstPoints, CV_FM_RANSAC, reprojectionThreshold, inliersMask);</span>
到此,一個簡單的匹配任務就算是完成啦。
看完這部分內容最大的心得體會就是,作爲一個寫工程代碼的人來說,要好好的去學習和掌握工具,可以避免好多沒有意義的重複勞動。
跑了一個例子:
運行結果:
特徵匹配:
一致的透視變換:
光流:
以上使用的代碼是Mastering OpenCV with Practical Computer Vision Projects書上的源碼經整理以後的代碼,順便嘗試了一下光流算法的調用。通過對這段源碼的學習,基本能夠掌握OpenCV2.4版本以後檢測,特徵提取與匹配方法。如果調用遇到困難,還是可以直接查看源碼來的更快捷。
- #include <iostream>
- #include <fstream>
- #include <sstream>
- #include "opencv2/opencv.hpp"
- using namespace cv;
- using namespace std;
- void KeyPointsToPoints(vector<KeyPoint> kpts, vector<Point2f> &pts);
- bool refineMatchesWithHomography(
- const std::vector<cv::KeyPoint>& queryKeypoints,
- const std::vector<cv::KeyPoint>& trainKeypoints,
- float reprojectionThreshold, std::vector<cv::DMatch>& matches,
- cv::Mat& homography);
- /** @function main */
- int main(int argc, char* argv[]) {
- /************************************************************************/
- /* 特徵點檢測,特徵提取,特徵匹配,計算投影變換 */
- /************************************************************************/
- // 讀取圖片
- Mat img1Ori = imread("1.jpg", 0);
- Mat img2Ori = imread("2.jpg", 0);
- // 縮小尺度
- Mat img1, img2;
- resize(img1Ori, img1, Size(img1Ori.cols / 4, img1Ori.cols / 4));
- resize(img2Ori, img2, Size(img2Ori.cols / 4, img2Ori.cols / 4));
- cv::Ptr<cv::FeatureDetector> detector = new cv::ORB(1000); // 創建orb特徵點檢測
- cv::Ptr<cv::DescriptorExtractor> extractor = new cv::FREAK(true, true); // 用Freak特徵來描述特徵點
- cv::Ptr<cv::DescriptorMatcher> matcher = new cv::BFMatcher(cv::NORM_HAMMING, // 特徵匹配,計算Hamming距離
- true);
- vector<KeyPoint> keypoints1; // 用於保存圖中的特徵點
- vector<KeyPoint> keypoints2;
- Mat descriptors1; // 用於保存圖中的特徵點的特徵描述
- Mat descriptors2;
- detector->detect(img1, keypoints1); // 檢測第一張圖中的特徵點
- detector->detect(img2, keypoints2);
- extractor->compute(img1, keypoints1, descriptors1); // 計算圖中特徵點位置的特徵描述
- extractor->compute(img2, keypoints2, descriptors2);
- vector<DMatch> matches;
- matcher->match(descriptors1, descriptors2, matches);
- Mat imResultOri;
- drawMatches(img1, keypoints1, img2, keypoints2, matches, imResultOri,
- CV_RGB(0,255,0), CV_RGB(0,255,0));
- cout << "[Info] # of matches : " << matches.size() << endl;
- Mat matHomo;
- refineMatchesWithHomography(keypoints1, keypoints2, 3, matches, matHomo);
- cout << "[Info] Homography T : " << matHomo << endl;
- cout << "[Info] # of matches : " << matches.size() << endl;
- Mat imResult;
- drawMatches(img1, keypoints1, img2, keypoints2, matches, imResult,
- CV_RGB(0,255,0), CV_RGB(0,255,0));
- // 計算光流
- vector<uchar> vstatus;
- vector<float> verrs;
- vector<Point2f> points1;
- vector<Point2f> points2;
- KeyPointsToPoints(keypoints1, points1);
- calcOpticalFlowPyrLK(img1, img2, points1, points2, vstatus, verrs);
- Mat imOFKL = img1.clone();
- for (int i = 0; i < vstatus.size(); i++) {
- if (vstatus[i] && verrs[i] < 15) {
- line(imOFKL, points1[i], points2[i], CV_RGB(255,255,255), 1, 8, 0);
- circle(imOFKL, points2[i], 3, CV_RGB(255,255,255), 1, 8, 0);
- }
- }
- imwrite("opt.jpg", imOFKL);
- imwrite("re1.jpg", imResultOri);
- imwrite("re2.jpg", imResult);
- imshow("Optical Flow", imOFKL);
- imshow("origin matches", imResultOri);
- imshow("refined matches", imResult);
- waitKey();
- return -1;
- }
- bool refineMatchesWithHomography(
- const std::vector<cv::KeyPoint>& queryKeypoints,
- const std::vector<cv::KeyPoint>& trainKeypoints,
- float reprojectionThreshold, std::vector<cv::DMatch>& matches,
- cv::Mat& homography) {
- const int minNumberMatchesAllowed = 8;
- if (matches.size() < minNumberMatchesAllowed)
- return false;
- // Prepare data for cv::findHomography
- std::vector<cv::Point2f> srcPoints(matches.size());
- std::vector<cv::Point2f> dstPoints(matches.size());
- for (size_t i = 0; i < matches.size(); i++) {
- srcPoints[i] = trainKeypoints[matches[i].trainIdx].pt;
- dstPoints[i] = queryKeypoints[matches[i].queryIdx].pt;
- }
- // Find homography matrix and get inliers mask
- std::vector<unsigned char> inliersMask(srcPoints.size());
- homography = cv::findHomography(srcPoints, dstPoints, CV_FM_RANSAC,
- reprojectionThreshold, inliersMask);
- std::vector<cv::DMatch> inliers;
- for (size_t i = 0; i < inliersMask.size(); i++) {
- if (inliersMask[i])
- inliers.push_back(matches[i]);
- }
- matches.swap(inliers);
- return matches.size() > minNumberMatchesAllowed;
- }
- void KeyPointsToPoints(vector<KeyPoint> kpts, vector<Point2f> &pts) {
- for (int i = 0; i < kpts.size(); i++) {
- pts.push_back(kpts[i].pt);
- }
- return;
- }
原文地址:http://blog.csdn.net/u010141147/article/details/9464571