元算法(集成算法):不同分類器的組合。類型可包括:不同算法集成、同一算法在不同設置下的集成、數據集不同部分分配給不同分類器後的集成
自舉匯聚法(bagging法):從原數據集選擇S次後得到S個新的數據集(大小相等,允許有重複)。(分類器權重相等)
boosting:與bagging類似,但是boosting是通過集中關注被已有的分類器錯分的那些數據來獲得新的分類器。(分類結果基於所有分類器的加權求和的結果)
單層決策樹:基於單個特徵來作出決策
說明:以下代碼是單分類器的不同設置的集成,多個分類器組合效果會較好。推薦xgboost。
#encoding:utf-8
from numpy import *
def loadSimpData():
datMat = matrix([[ 1. , 2.1],
[ 2. , 1.1],
[ 1.3, 1. ],
[ 1. , 1. ],
[ 2. , 1. ]])
classLabels = [1.0, 1.0, -1.0, -1.0, 1.0]
return datMat,classLabels
#自適應數據加載
def loadDataSet(fileName):
numFeat = len(open(fileName).readline().split('\t')) #獲取文件內容
dataMat = []; labelMat = []
fr = open(fileName)
for line in fr.readlines():
lineArr =[]
curLine = line.strip().split('\t')
for i in range(numFeat-1):
lineArr.append(float(curLine[i]))
dataMat.append(lineArr)
labelMat.append(float(curLine[-1]))
return dataMat,labelMat
#單層決策樹~生成
#通過閾值比較~閾值一側爲-1~另一側爲+1
def stumpClassify(dataMatrix,dimen,threshVal,threshIneq):#直接分類這些數據
retArray = ones((shape(dataMatrix)[0],1))#均初始化爲1
if threshIneq == 'lt':
retArray[dataMatrix[:,dimen] <= threshVal] = -1.0
else:
retArray[dataMatrix[:,dimen] > threshVal] = -1.0
return retArray
#找到最佳單層決策樹
#遍歷stumpClassify()所有可能輸入值,並找到數據集最佳單層決策樹
#dataArr數據 classLabels類別標籤 D:權重向量
def buildStump(dataArr,classLabels,D):
dataMatrix = mat(dataArr); labelMat = mat(classLabels).T
m,n = shape(dataMatrix)
numSteps = 10.0; bestStump = {}; bestClasEst = mat(zeros((m,1)))
minError = inf #初始化最小錯誤率爲無限大~
for i in range(n):#遍歷所有特徵
rangeMin = dataMatrix[:,i].min(); rangeMax = dataMatrix[:,i].max();
stepSize = (rangeMax-rangeMin)/numSteps#通過計算最大最小值來確定步長
for j in range(-1,int(numSteps)+1):#再次遍歷這些值
for inequal in ['lt', 'gt']: #去比較大於還是小於
threshVal = (rangeMin + float(j) * stepSize)
predictedVals = stumpClassify(dataMatrix,i,threshVal,inequal)#預測值
errArr = mat(ones((m,1)))#錯誤向量
errArr[predictedVals == labelMat] = 0#當預測值與標籤值相同時,errArr置0
weightedError = D.T*errArr #錯誤分類的權重
print "split: dim %d, thresh %.2f, thresh ineqal: %s, the weighted error is %.3f" % (i, threshVal, inequal, weightedError)
if weightedError < minError:#將當前錯誤率與歷史最小錯誤率比較,如果當前值較小,就在詞典bestStump中保存該單層決策樹
minError = weightedError
bestClasEst = predictedVals.copy()
bestStump['dim'] = i
bestStump['thresh'] = threshVal
bestStump['ineq'] = inequal
return bestStump,minError,bestClasEst#返回字典、錯誤率、類別估計值
#基於單層決策樹的AdaBoost~~~numIt:弱分類器數目
def adaBoostTrainDS(dataArr,classLabels,numIt=40):
weakClassArr = []
m = shape(dataArr)[0]
D = mat(ones((m,1))/m) #初始化向量D~概率分佈向量,總和爲1
aggClassEst = mat(zeros((m,1)))
for i in range(numIt):
bestStump,error,classEst = buildStump(dataArr,classLabels,D)#建樹~得到最小錯誤率
#print "D:",D.T
alpha = float(0.5*log((1.0-error)/max(error,1e-16)))#alpha告訴總分類器本次單層決策樹結果輸出的權重, max(error,eps) 確保在error=0時不會除0溢出~
bestStump['alpha'] = alpha#加入到bestStump字典中
weakClassArr.append(bestStump) #store Stump Params in Array
#print "classEst: ",classEst.T
#權重向量更新D=((D’t)*(e’-x))/sum(D)~~ ’號我在這裏表示次冪
expon = multiply(-1*alpha*mat(classLabels).T,classEst)
D = multiply(D,exp(expon))
D = D/D.sum()
#所有分類器的訓練誤差計算,如果爲0,則跳出循環
aggClassEst += alpha*classEst#得到類別估計值
#print "aggClassEst: ",aggClassEst.T
aggErrors = multiply(sign(aggClassEst) != mat(classLabels).T,ones((m,1)))#總錯誤率
errorRate = aggErrors.sum()/m
print "total error: ",errorRate
if errorRate == 0.0: break
return weakClassArr,aggClassEst
#所有分類器的結果加權求和,得到最終結果
#利用訓練出的多個弱分類器進行分類
#輸入:一個或者多個待分類樣例datToClass,多個弱分類器組成的數組classifierArr
def adaClassify(datToClass,classifierArr):
dataMatrix = mat(datToClass)#do stuff similar to last aggClassEst in adaBoostTrainDS~轉numpy矩陣
m = shape(dataMatrix)[0]#得到分類樣例個數
aggClassEst = mat(zeros((m,1)))#構建列向量
for i in range(len(classifierArr)):#遍歷弱分類器
classEst = stumpClassify(dataMatrix,classifierArr[i]['dim'],\
classifierArr[i]['thresh'],\
classifierArr[i]['ineq'])#call stump classify
aggClassEst += classifierArr[i]['alpha']*classEst#得到類別估計值
print aggClassEst
return sign(aggClassEst)#返回aggClassEst的符號,即大於0爲+1~小於0爲-1
#輸入:predStrengths-分類器的預測強度 classLabels類別標籤
def plotROC(predStrengths, classLabels):
import matplotlib.pyplot as plt
cur = (1.0,1.0) #光標位置
ySum = 0.0 #用於計算AUC值
numPosClas = sum(array(classLabels)==1.0)#計算正例數目
yStep = 1/float(numPosClas); xStep = 1/float(len(classLabels)-numPosClas)#x,y軸步長
sortedIndicies = predStrengths.argsort()#get sorted index, it's reverse
fig = plt.figure()
fig.clf()
ax = plt.subplot(111)
#loop through all the values, drawing a line segment at each point
for index in sortedIndicies.tolist()[0]:
if classLabels[index] == 1.0:
delX = 0; delY = yStep;
else:
delX = xStep; delY = 0;
ySum += cur[1]
#draw line from cur to (cur[0]-delX,cur[1]-delY)
#分類代價計算
ax.plot([cur[0],cur[0]-delX],[cur[1],cur[1]-delY], c='b')
cur = (cur[0]-delX,cur[1]-delY)
ax.plot([0,1],[0,1],'b--')
plt.xlabel('False positive rate'); plt.ylabel('True positive rate')
plt.title('ROC curve for AdaBoost horse colic detection system')
ax.axis([0,1,0,1])
plt.show()
print "the Area Under the Curve is: ",ySum*xStep