(信貸風控十三)GBDT模型用於申請評分卡模型python實現(代碼實現)

(十三)GBDT模型用於評分卡模型python實現

前一篇我們已經介紹了GBDT模型用於評分卡模型的原理(理論)

https://blog.csdn.net/LuYi_WeiLin/article/details/88314746

這篇博客附上GBDT模型用於評分卡模型python實現的代碼

(之前已經有一篇運用邏輯迴歸實現申請評分卡的文章https://blog.csdn.net/LuYi_WeiLin/article/details/85060190

數據集可以去我的資源下載,和邏輯迴歸實現申請評分卡的數據集一樣

代碼如下:

import pandas as pd
import time
import numpy as np
import re
from sklearn.ensemble import GradientBoostingClassifier
from sklearn import cross_validation, metrics
from sklearn.model_selection import GridSearchCV, train_test_split
import matplotlib.pylab as plt
import datetime
from dateutil.relativedelta import relativedelta
from numpy import log
from sklearn.metrics import roc_auc_score
from sklearn.feature_extraction import DictVectorizer
from sklearn.preprocessing import OneHotEncoder
from sklearn.linear_model.logistic import LogisticRegression

'''
時間:20190311
作者:小象學院
'''


def CareerYear(x):
    #對工作年限進行轉換
    if str(x).find('nan') > -1:
        return -1
    elif str(x).find("10+")>-1:   #將"10+years"轉換成 11
        return 11
    elif str(x).find('< 1') > -1:  #將"< 1 year"轉換成 0
        return 0
    else:
        return int(re.sub("\D", "", x))   #其餘數據,去掉"years"並轉換成整數


def DescExisting(x):
    #將desc變量轉換成有記錄和無記錄兩種
    if type(x).__name__ == 'float':
        return 'no desc'
    else:
        return 'desc'


def ConvertDateStr(x):
    mth_dict = {'Jan': 1, 'Feb': 2, 'Mar': 3, 'Apr': 4, 'May': 5, 'Jun': 6, 'Jul': 7, 'Aug': 8, 'Sep': 9, 'Oct': 10,
                'Nov': 11, 'Dec': 12}
    if str(x) == 'nan':
        return datetime.datetime.fromtimestamp(time.mktime(time.strptime('9900-1','%Y-%m')))
        #time.mktime 不能讀取1970年之前的日期
    else:
        yr = int(x[4:6])
        if yr <=17:
            yr = 2000+yr
        else:
            yr = 1900 + yr
        mth = mth_dict[x[:3]]
        return datetime.datetime(yr,mth,1)


def MonthGap(earlyDate, lateDate):
    if lateDate > earlyDate:
        gap = relativedelta(lateDate,earlyDate)
        yr = gap.years
        mth = gap.months
        return yr*12+mth
    else:
        return 0


def MakeupMissing(x):
    if np.isnan(x):
        return -1
    else:
        return x



'''
第一步:數據準備
'''
folderOfData = foldOfData = 'H:/'
allData = pd.read_csv(folderOfData + '數據集.csv',header = 0, encoding = 'latin1',engine ='python')
allData['term'] = allData['term'].apply(lambda x: int(x.replace(' months','')))
# 處理標籤:Fully Paid是正常用戶;Charged Off是違約用戶
allData['y'] = allData['loan_status'].map(lambda x: int(x == 'Charged Off'))

'''
由於存在不同的貸款期限(term),申請評分卡模型評估的違約概率必須要在統一的期限中,且不宜太長,所以選取term=36months的行本
'''
allData1 = allData.loc[allData.term == 36]
trainData, testData = train_test_split(allData1,test_size=0.4)



'''
第二步:數據預處理
'''
# 將帶%的百分比變爲浮點數
trainData['int_rate_clean'] = trainData['int_rate'].map(lambda x: float(x.replace('%',''))/100)
# 將工作年限進行轉化,否則影響排序
trainData['emp_length_clean'] = trainData['emp_length'].map(CareerYear)
# 將desc的缺失作爲一種狀態,非缺失作爲另一種狀態
trainData['desc_clean'] = trainData['desc'].map(DescExisting)
# 處理日期。earliest_cr_line的格式不統一,需要統一格式且轉換成python的日期
trainData['app_date_clean'] = trainData['issue_d'].map(lambda x: ConvertDateStr(x))
trainData['earliest_cr_line_clean'] = trainData['earliest_cr_line'].map(lambda x: ConvertDateStr(x))
# 處理mths_since_last_delinq。注意原始值中有0,所以用-1代替缺失
trainData['mths_since_last_delinq_clean'] = trainData['mths_since_last_delinq'].map(lambda x:MakeupMissing(x))
trainData['mths_since_last_record_clean'] = trainData['mths_since_last_record'].map(lambda x:MakeupMissing(x))
trainData['pub_rec_bankruptcies_clean'] = trainData['pub_rec_bankruptcies'].map(lambda x:MakeupMissing(x))

'''
第三步:變量衍生
'''
# 考慮申請額度與收入的佔比
trainData['limit_income'] = trainData.apply(lambda x: x.loan_amnt / x.annual_inc, axis = 1)
# 考慮earliest_cr_line到申請日期的跨度,以月份記
trainData['earliest_cr_to_app'] = trainData.apply(lambda x: MonthGap(x.earliest_cr_line_clean,x.app_date_clean), axis = 1)


'''
對於類別型變量,需要onehot(獨熱)編碼,再訓練GBDT模型
'''
num_features = ['int_rate_clean','emp_length_clean','annual_inc', 'dti', 'delinq_2yrs', 'earliest_cr_to_app','inq_last_6mths', \
                'mths_since_last_record_clean', 'mths_since_last_delinq_clean','open_acc','pub_rec','total_acc','limit_income','earliest_cr_to_app']
cat_features = ['home_ownership', 'verification_status','desc_clean', 'purpose', 'zip_code','addr_state','pub_rec_bankruptcies_clean']

#獨熱編碼
v = DictVectorizer(sparse=False)
X1 = v.fit_transform(trainData[cat_features].to_dict('records'))
#將獨熱編碼和數值型變量放在一起進行模型訓練
X2 = np.matrix(trainData[num_features])
X = np.hstack([X1,X2])
y = trainData['y']

# 未經調參進行GBDT模型訓練
gbm0 = GradientBoostingClassifier(random_state=10)
gbm0.fit(X,y)



y_pred = gbm0.predict(X)
y_predprob = gbm0.predict_proba(X)[:,1].T
print("Accuracy : %.4g" % metrics.accuracy_score(y, y_pred))
print("AUC Score (Train): %f" % metrics.roc_auc_score(np.array(y.T), y_predprob))



'''
第四步:在測試集上測試模型的性能
'''
# 將帶%的百分比變爲浮點數
testData['int_rate_clean'] = testData['int_rate'].map(lambda x: float(x.replace('%',''))/100)
# 將工作年限進行轉化,否則影響排序
testData['emp_length_clean'] = testData['emp_length'].map(CareerYear)
# 將desc的缺失作爲一種狀態,非缺失作爲另一種狀態
testData['desc_clean'] = testData['desc'].map(DescExisting)
# 處理日期。earliest_cr_line的格式不統一,需要統一格式且轉換成python的日期
testData['app_date_clean'] = testData['issue_d'].map(lambda x: ConvertDateStr(x))
testData['earliest_cr_line_clean'] = testData['earliest_cr_line'].map(lambda x: ConvertDateStr(x))
# 處理mths_since_last_delinq。注意原始值中有0,所以用-1代替缺失
testData['mths_since_last_delinq_clean'] = testData['mths_since_last_delinq'].map(lambda x:MakeupMissing(x))
testData['mths_since_last_record_clean'] = testData['mths_since_last_record'].map(lambda x:MakeupMissing(x))
testData['pub_rec_bankruptcies_clean'] = testData['pub_rec_bankruptcies'].map(lambda x:MakeupMissing(x))


# 考慮申請額度與收入的佔比
testData['limit_income'] = testData.apply(lambda x: x.loan_amnt / x.annual_inc, axis = 1)
# 考慮earliest_cr_line到申請日期的跨度,以月份記
testData['earliest_cr_to_app'] = testData.apply(lambda x: MonthGap(x.earliest_cr_line_clean,x.app_date_clean), axis = 1)

#用訓練集裏的onehot編碼方式進行編碼
X1_test = v.transform(testData[cat_features].to_dict('records'))
X2_test = np.matrix(testData[num_features])
X_test = np.hstack([X1_test,X2_test])
y_test = np.matrix(testData['y']).T


### 計算KS值
def KS(df, score, target):
    '''
    :param df: 包含目標變量與預測值的數據集,dataframe
    :param score: 得分或者概率,str
    :param target: 目標變量,str
    :return: KS值
    '''
    total = df.groupby([score])[target].count()
    bad = df.groupby([score])[target].sum()
    all = pd.DataFrame({'total':total, 'bad':bad})
    all['good'] = all['total'] - all['bad']
    all[score] = all.index
    all = all.sort_values(by=score,ascending=False)
    all.index = range(len(all))
    all['badCumRate'] = all['bad'].cumsum() / all['bad'].sum()
    all['goodCumRate'] = all['good'].cumsum() / all['good'].sum()
    KS = all.apply(lambda x: x.badCumRate - x.goodCumRate, axis=1)
    return max(KS)


#在測試集上測試GBDT性能
y_pred = gbm0.predict(X_test)
y_predprob = gbm0.predict_proba(X_test)[:,1].T
testData['predprob'] = list(y_predprob)
print("Accuracy : %.4g" % metrics.accuracy_score(y_test, y_pred))
print("AUC Score (Test): %f" % metrics.roc_auc_score(np.array(y_test)[:,0], y_predprob))
print("KS is :%f" % KS(testData, 'predprob', 'y'))


'''
GBDT調參
'''
# 1, 選擇較小的步長(learning rate)後,對迭代次數(n_estimators)進行調參

X = pd.DataFrame(X)

param_test1 = {'n_estimators':range(80,81,10)}
gsearch1 = GridSearchCV(estimator = GradientBoostingClassifier(learning_rate=0.1, min_samples_split=30,min_samples_leaf=5,max_depth=8,max_features='sqrt', subsample=0.8,random_state=10),param_grid = param_test1, scoring='roc_auc',iid=False,cv=5)
gsearch1.fit(X,y)
gsearch1.best_params_, gsearch1.best_score_
best_n_estimator = gsearch1.best_params_['n_estimators']


# 2, 對決策樹最大深度max_depth和內部節點再劃分所需最小樣本數min_samples_split進行網格搜索
param_test2 = {'max_depth':range(3,4), 'min_samples_split':range(6,7)}
gsearch2 = GridSearchCV(estimator = GradientBoostingClassifier(learning_rate=0.1, n_estimators=best_n_estimator, min_samples_leaf=20, max_features='sqrt', subsample=0.8, random_state=10),param_grid = param_test2, scoring='roc_auc',iid=False, cv=5)
gsearch2.fit(X,y)
gsearch2.best_params_, gsearch2.best_score_
best_max_depth = gsearch2.best_params_['max_depth']

#3, 再對內部節點再劃分所需最小樣本數min_samples_split和葉子節點最少樣本數min_samples_leaf一起調參
param_test3 = {'min_samples_split':range(80,81,10), 'min_samples_leaf':range(50,51,5)}
gsearch3 = GridSearchCV(estimator = GradientBoostingClassifier(learning_rate=0.1, n_estimators=best_n_estimator,max_depth=best_max_depth,max_features='sqrt', subsample=0.8, random_state=10),param_grid = param_test3, scoring='roc_auc',iid=False, cv=5)
gsearch3.fit(X,y)
gsearch3.best_params_, gsearch3.best_score_
best_min_samples_split, best_min_samples_leaf = gsearch3.best_params_['min_samples_split'],gsearch3.best_params_['min_samples_leaf']

#4, 對最大特徵數max_features進行網格搜索
param_test4 = {'max_features':range(int(np.sqrt(X.shape[0])),int(np.sqrt(X.shape[0]))+1,5)}
gsearch4 = GridSearchCV(estimator = GradientBoostingClassifier(learning_rate=0.1, n_estimators=best_n_estimator,max_depth=best_max_depth, min_samples_leaf =best_min_samples_leaf,min_samples_split =best_min_samples_split, subsample=0.8, random_state=10),param_grid = param_test4, scoring='roc_auc',iid=False, cv=5)
gsearch4.fit(X,y)
gsearch4.best_params_, gsearch4.best_score_
best_max_features = gsearch4.best_params_['max_features']

#5, 對採樣比例進行網格搜索
param_test5 = {'subsample':[0.6+i*0.05 for i in range(1)]}
gsearch5 = GridSearchCV(estimator = GradientBoostingClassifier(learning_rate=0.1, n_estimators=best_n_estimator,max_depth=best_max_depth,min_samples_leaf =best_min_samples_leaf, max_features=best_max_features,random_state=10),param_grid = param_test5, scoring='roc_auc',iid=False, cv=5)
gsearch5.fit(X,y)
gsearch5.best_params_, gsearch5.best_score_
best_subsample = gsearch5.best_params_['subsample']


gbm_best = GradientBoostingClassifier(learning_rate=0.1, n_estimators=best_n_estimator,max_depth=best_max_depth,min_samples_leaf =best_min_samples_leaf, max_features=best_max_features,subsample =best_subsample, random_state=10)
gbm_best.fit(X,y)


#在測試集上測試並計算性能
y_pred = gbm_best.predict(X_test)
y_predprob = gbm_best.predict_proba(X_test)[:,1].T
testData['predprob'] = list(y_predprob)
#準確性
print("Accuracy : %.4g" % metrics.accuracy_score(y_test, y_pred))
print("AUC Score (Test): %f" % metrics.roc_auc_score(np.array(y_test)[:,0], y_predprob))
print("KS is :%f"%KS(testData, 'predprob', 'y'))


###########概率轉換爲分數########################
def Prob2Score(prob, basePoint, PDO):
    #將概率轉化成分數且爲正整數
    y = np.log(prob/(1-prob))
    return int(basePoint+PDO/np.log(2)*(-y))

basePoint = 250
PDO = 50
testData['score'] = testData['predprob'].map(lambda x:Prob2Score(x, basePoint, PDO))
testData = testData.sort_values(by = 'score')
#畫出分佈圖
plt.hist(testData['score'], 100)
plt.xlabel('score')
plt.ylabel('freq')
plt.title('distribution')

 

 

 

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章