Bagging 的python實現

這個版本的Bagging實現了Sklearn的基礎功能,並且擴展了基礎模型的數量(sklearn中基礎模型只能爲一種),我的版本Bagging的基礎模型可以爲多種(如:300個分類器,可以選100棵決策樹、100個SVM 和100個KNN爲基礎模型進行Bagging),sklearn中基礎模型只能爲一種。
除此之外,可以在框架內定義每個模型的數據集,而不是sklearn中的寫死的只有放回取樣。

#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on 2017-08-28 

@author: panda_zjd
"""
import numpy as np
import pandas as pd
from collections import defaultdict 
import random
from sklearn.model_selection import train_test_split
from sklearn.metrics import precision_score, recall_score
from sklearn.ensemble import IsolationForest

class Bagging(object):

    def __init__(self,n_estimators,estimator,rate=1.0):
        self.estimator = estimator
        self.n_estimators = n_estimators
        self.rate = rate

    def Voting(self,data):          #投票法
        term = np.transpose(data)   #轉置
        result =list()              #存儲結果

        def Vote(df):               #對每一行做投票
            store = defaultdict()
            for kw in df:
                store.setdefault(kw, 0)
                store[kw] += 1
            return max(store,key=store.get)

        result= map(Vote,term)      #獲取結果
        return result

    #隨機欠採樣函數
    def UnderSampling(self,data):
        #np.random.seed(np.random.randint(0,1000))
        data=np.array(data)
        np.random.shuffle(data)    #打亂data          
        newdata = data[0:int(data.shape[0]*self.rate),:]   #切片,取總數*rata的個數,刪去(1-rate)%的樣本
        return newdata   

    def TrainPredict(self,train,test):          #訓練基礎模型,並返回模型預測結果
        clf = self.estimator.fit(train[:,0:-1],train[:,-1])
        result = clf.predict(test[:,0:-1])
        return result

    #簡單有放回採樣
    def RepetitionRandomSampling(self,data,number):     #有放回採樣,number爲抽樣的個數
        sample=[]
        for i in range(int(self.rate*number)):
             sample.append(data[random.randint(0,len(data)-1)])
        return sample

    def Metrics(self,predict_data,test):        #評價函數
        score = predict_data
        recall=recall_score(test[:,-1], score, average=None)    #召回率
        precision=precision_score(test[:,-1], score, average=None)  #查準率
        return recall,precision


    def MutModel_clf(self,train,test,sample_type = "RepetitionRandomSampling"):
        print "self.Bagging Mul_basemodel"
        result = list()
        num_estimators =len(self.estimator)   #使用基礎模型的數量

        if sample_type == "RepetitionRandomSampling":
            print "選擇的採樣方法:",sample_type
            sample_function = self.RepetitionRandomSampling
        elif sample_type == "UnderSampling":
            print "選擇的採樣方法:",sample_type
            sample_function = self.UnderSampling 
            print "採樣率",self.rate
        elif sample_type == "IF_SubSample":
            print "選擇的採樣方法:",sample_type
            sample_function = self.IF_SubSample 
            print "採樣率",(1.0-self.rate)

        for estimator in self.estimator:
            print estimator
            for i in range(int(self.n_estimators/num_estimators)):
                sample=np.array(sample_function(train,len(train)))       #構建數據集
                clf = estimator.fit(sample[:,0:-1],sample[:,-1])
                result.append(clf.predict(test[:,0:-1]))      #訓練模型 返回每個模型的輸出

        score = self.Voting(result)
        recall,precosoion = self.Metrics(score,test)
        return recall,precosoion    

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章