一:基本原理
NCC是一種基於統計學計算兩組樣本數據相關性的算法,其取值範圍爲[-1, 1]之間,而對圖像來說,每個像素點都可以看出是RGB數值,這樣整幅圖像就可以看成是一個樣本數據的集合,如果它有一個子集與另外一個樣本數據相互匹配則它的ncc值爲1,表示相關性很高,如果是-1則表示完全不相關,基於這個原理,實現圖像基於模板匹配識別算法。
圖像匹配指在已知目標基準圖的子圖集合中,尋找與實時圖像最相似的子圖,以達到目標識別與定位目的的圖像技術。主要方法有:基於圖像灰度相關方法、基於圖像特徵方法、基於神經網絡相關的人工智能方法(還在完善中)。
基於圖像灰度的匹配算法簡單,匹配準確度高,主要用空間域的一維或二維滑動模版進行圖像匹配,不同的算法區別主要體現在模版及相關準則的選擇方面,但計算量大,不利於實時處理,對灰度變化、旋轉、形變以及遮擋等比較敏感;
基於圖像特徵的方法計算量相對較小,對灰度變化、形變及遮擋有較好的適應性,通過在原始圖中提取點、線、區域等顯著特徵作爲匹配基元,進而用於特徵匹配,但是匹配精度不高。
通常又把基於灰度的匹配算法,稱作相關匹配算法。相關匹配算法又分爲兩類:一類強調景物之間的差別程度如平法差法(SD)和平均絕對差值法(MAD)等;另一類強調景物之間的相似程度,主要算法又分成兩類,一是積相關匹配法,二是相關係數法。
今天我們就來說說歸一化互相關係數法(NCC)。
g的灰度均值
圖像Sx,y的灰度均值
相關係數滿足:
在[-1,1]絕對尺度範圍之間衡量兩者的相似性。相關係數刻畫了兩者之間的近似程度的線性描述。一般說來,越接近於1,兩者越近似的有線性關係。
2. C++代碼實現
(1) 獲取模板像素並計算均值與標準方差、像素與均值diff數據樣本
(2) 根據模板大小,在目標圖像上從左到右,從上到下移動窗口,計
算每移動一個像素之後窗口內像素與模板像素的ncc值,與閾值比較,大於
閾值則記錄位置
(3) 根據得到位置信息,使用紅色矩形標記出模板匹配識別結果。
(4) UI顯示結果
int main() {
Mat image2 = imread("C:\\Users\\Administrator\\Desktop\\t2\\temp.png", IMREAD_GRAYSCALE);
Mat image1 = imread("C:\\Users\\Administrator\\Desktop\\t2\\temp.png", IMREAD_GRAYSCALE);
int overlap = image2.cols;
float pearsonCorrelationCoefficientMax = 0;
int overlapMaxCorrelationCoefficient = 0;
//for (int overlap = 350; overlap < image2.cols; overlap += 10)
{
//****************************************//
Mat imageTemp = image2;//(Rect(0, 0, overlap, image1.rows));
long double tempTotalcount = 0;
long double tempTotalPixel = 0;
for (int i = 0; i < overlap; i++)
{
for (int j = 0; j < image1.rows; j++)
{
tempTotalcount += 1;
//cout << i<<","<<j<<":"<<int(imageTemp.at<uchar>(j,i)) << ",";
tempTotalPixel += float(imageTemp.at<uchar>(j, i));
}
cout << endl;
}
float tempAvg = tempTotalPixel / tempTotalcount;//灰度平均值
//**************************************//
long double tempSubstract = 0;
for (int i = 0; i < overlap; i++)
{
for (int j = 0; j < image1.rows; j++)
{
long double tempSquare = (long double(imageTemp.at<uchar>(j, i)) - tempAvg)* (long double(imageTemp.at<uchar>(j, i)) - tempAvg);
tempSubstract = tempSubstract + tempSquare;
}
cout << endl;
}
float tempVariance = sqrt(tempSubstract / tempTotalcount);//灰度標準差
//***********************************************//
Mat imageBase = image1(Rect(image1.cols - overlap, 0, overlap, image1.rows));
int baseTotalcount = 0;
int baseTotalPixel = 0;
for (int i = 0; i < overlap; i++)
{
for (int j = 0; j < image1.rows; j++)
{
baseTotalcount += 1;
//cout << i<<","<<j<<":"<<int(imageTemp.at<uchar>(j,i)) << ",";
baseTotalPixel += float(imageBase.at<uchar>(j, i));
}
cout << endl;
}
float baseAvg = baseTotalPixel / baseTotalcount;
//*****************************************//
long double baseSubstract = 0;
for (int i = 0; i < overlap; i++)
{
for (int j = 0; j < image1.rows; j++)
{
long double baseSquare = (long double(imageBase.at<uchar>(j, i)) - baseAvg)* (long double(imageBase.at<uchar>(j, i)) - baseAvg);
baseSubstract = baseSubstract + baseSquare;
}
cout << endl;
}
float baseVariance = sqrt(baseSubstract / baseTotalcount);
//***************************************//
long double dotMul = 0;
for (int i = 0; i < overlap; i++)
{
for (int j = 0; j < image1.rows; j++)
{
dotMul += abs((long double(imageBase.at<uchar>(j, i)) - baseAvg)*(long double(imageTemp.at<uchar>(j, i)) - tempAvg));
}
cout << endl;
}
float dotMulAvg = dotMul / (baseTotalcount);
float pearsonCorrelationCoefficient = dotMulAvg / (baseVariance*tempVariance);
if (pearsonCorrelationCoefficientMax < pearsonCorrelationCoefficient)
{
pearsonCorrelationCoefficientMax = pearsonCorrelationCoefficient;
overlapMaxCorrelationCoefficient = overlap;
}
}
cout << "最大相關係數" << pearsonCorrelationCoefficientMax << endl;
cout << "最大相關係數時重疊區域" << overlapMaxCorrelationCoefficient << endl;
imshow("", image1);
waitKey(0);
}