樸素貝葉斯算法——實現新聞分類(Sklearn實現)
轉自:https://blog.csdn.net/asialee_bird
1、樸素貝葉斯實現新聞分類的步驟
(1)提供文本文件,即數據集下載
(2)準備數據
將數據集劃分爲訓練集和測試集;使用jieba模塊進行分詞,詞頻統計,停用詞過濾,文本特徵提取,將文本數據向量化
停用詞文本stopwords_cn.txt下載
jieba模塊學習:https://github.com/fxsjy/jieba ; https://www.oschina.net/p/jieba
(3)分析數據:使用matplotlib模塊分析
(4)訓練算法:使用sklearn.naive_bayes 的MultinomialNB進行訓練
在scikit-learn中,一共有3個樸素貝葉斯的分類算法類。分別是GaussianNB,MultinomialNB和BernoulliNB。
其中GaussianNB就是先驗爲高斯分佈的樸素貝葉斯,MultinomialNB就是先驗爲多項式分佈的樸素貝葉斯,而BernoulliNB就是先驗爲伯努利分佈的樸素貝葉斯。
(5)測試算法:使用測試集對貝葉斯分類器進行測試
2、代碼實現
# -*- coding: UTF-8 -*-
import os
import random
import jieba
from sklearn.naive_bayes import MultinomialNB
import matplotlib.pyplot as plt
"""
函數說明:中文文本處理
Parameters:
folder_path - 文本存放的路徑
test_size - 測試集佔比,默認佔所有數據集的百分之20
Returns:
all_words_list - 按詞頻降序排序的訓練集列表
train_data_list - 訓練集列表
test_data_list - 測試集列表
train_class_list - 訓練集標籤列表
test_class_list - 測試集標籤列表
"""
def TextProcessing(folder_path, test_size=0.2):
folder_list = os.listdir(folder_path) # 查看folder_path下的文件
data_list = [] # 數據集數據
class_list = [] # 數據集類別
# 遍歷每個子文件夾
for folder in folder_list:
new_folder_path = os.path.join(folder_path, folder) # 根據子文件夾,生成新的路徑
files = os.listdir(new_folder_path) # 存放子文件夾下的txt文件的列表
j = 1
# 遍歷每個txt文件
for file in files:
if j > 100: # 每類txt樣本數最多100個
break
with open(os.path.join(new_folder_path, file), 'r', encoding='utf-8') as f: # 打開txt文件
raw = f.read()
word_cut = jieba.cut(raw, cut_all=False) # 精簡模式,返回一個可迭代的generator
word_list = list(word_cut) # generator轉換爲list
data_list.append(word_list) # 添加數據集數據
class_list.append(folder) # 添加數據集類別
j += 1
data_class_list = list(zip(data_list, class_list)) # zip壓縮合並,將數據與標籤對應壓縮
random.shuffle(data_class_list) # 將data_class_list亂序
index = int(len(data_class_list) * test_size) + 1 # 訓練集和測試集切分的索引值
train_list = data_class_list[index:] # 訓練集
test_list = data_class_list[:index] # 測試集
train_data_list, train_class_list = zip(*train_list) # 訓練集解壓縮
test_data_list, test_class_list = zip(*test_list) # 測試集解壓縮
all_words_dict = {} # 統計訓練集詞頻
for word_list in train_data_list:
for word in word_list:
if word in all_words_dict.keys():
all_words_dict[word] += 1
else:
all_words_dict[word] = 1
# 根據鍵的值倒序排序
all_words_tuple_list = sorted(all_words_dict.items(), key=lambda f: f[1], reverse=True)
all_words_list, all_words_nums = zip(*all_words_tuple_list) # 解壓縮
all_words_list = list(all_words_list) # 轉換成列表
return all_words_list, train_data_list, test_data_list, train_class_list, test_class_list
"""
函數說明:讀取文件裏的內容,並去重
Parameters:
words_file - 文件路徑
Returns:
words_set - 讀取的內容的set集合
"""
def MakeWordsSet(words_file):
words_set = set() # 創建set集合
with open(words_file, 'r', encoding='utf-8') as f: # 打開文件
for line in f.readlines(): # 一行一行讀取
word = line.strip() # 去回車
if len(word) > 0: # 有文本,則添加到words_set中
words_set.add(word)
return words_set # 返回處理結果
"""
函數說明:文本特徵選取
Parameters:
all_words_list - 訓練集所有文本列表
deleteN - 刪除詞頻最高的deleteN個詞
stopwords_set - 指定的結束語
Returns:
feature_words - 特徵集
"""
def words_dict(all_words_list, deleteN, stopwords_set=set()):
feature_words = [] # 特徵列表
n = 1
for t in range(deleteN, len(all_words_list), 1):
if n > 1000: # feature_words的維度爲1000
break
# 如果這個詞不是數字,並且不是指定的結束語,並且單詞長度大於1小於5,那麼這個詞就可以作爲特徵詞
if not all_words_list[t].isdigit() and all_words_list[t] not in stopwords_set and 1 < len(all_words_list[t]) < 5:
feature_words.append(all_words_list[t])
n += 1
return feature_words
"""
函數說明:根據feature_words將文本向量化
Parameters:
train_data_list - 訓練集
test_data_list - 測試集
feature_words - 特徵集
Returns:
train_feature_list - 訓練集向量化列表
test_feature_list - 測試集向量化列表
"""
def TextFeatures(train_data_list, test_data_list, feature_words):
def text_features(text, feature_words): # 出現在特徵集中,則置1
text_words = set(text)
features = [1 if word in text_words else 0 for word in feature_words]
return features
train_feature_list = [text_features(text, feature_words) for text in train_data_list]
test_feature_list = [text_features(text, feature_words) for text in test_data_list]
return train_feature_list, test_feature_list # 返回結果
"""
函數說明:新聞分類器
Parameters:
train_feature_list - 訓練集向量化的特徵文本
test_feature_list - 測試集向量化的特徵文本
train_class_list - 訓練集分類標籤
test_class_list - 測試集分類標籤
Returns:
test_accuracy - 分類器精度
"""
def TextClassifier(train_feature_list, test_feature_list, train_class_list, test_class_list):
classifier = MultinomialNB().fit(train_feature_list, train_class_list)
test_accuracy = classifier.score(test_feature_list, test_class_list)
return test_accuracy
if __name__ == '__main__':
# 文本預處理
folder_path = './SogouC/Sample' # 訓練集存放地址
all_words_list, train_data_list, test_data_list, train_class_list, test_class_list = TextProcessing(folder_path,test_size=0.2)
# 生成stopwords_set
stopwords_file = './stopwords_cn.txt'
stopwords_set = MakeWordsSet(stopwords_file)
test_accuracy_list = []
"""
deleteNs = range(0, 1000, 20) # 0 20 40 60 ... 980
for deleteN in deleteNs:
feature_words = words_dict(all_words_list, deleteN, stopwords_set)
train_feature_list, test_feature_list = TextFeatures(train_data_list, test_data_list, feature_words)
test_accuracy = TextClassifier(train_feature_list, test_feature_list, train_class_list, test_class_list)
test_accuracy_list.append(test_accuracy)
plt.figure()
plt.plot(deleteNs, test_accuracy_list)
plt.title('Relationship of deleteNs and test_accuracy')
plt.xlabel('deleteNs')
plt.ylabel('test_accuracy')
plt.show()
"""
feature_words = words_dict(all_words_list, 450, stopwords_set)
train_feature_list, test_feature_list = TextFeatures(train_data_list, test_data_list, feature_words)
test_accuracy = TextClassifier(train_feature_list, test_feature_list, train_class_list, test_class_list)
test_accuracy_list.append(test_accuracy)
ave = lambda c: sum(c) / len(c)
print(ave(test_accuracy_list))
結果爲: