使用sklearn實現垃圾短信識別

import pandas as pd
import jieba 
data = pd.read_csv(r"E:\UCAS\labled.txt",sep = '\t',names=['label','text'])#read data and name the row
#print(data.head())
data['cut_message'] = data["text"].apply(lambda x:' '.join(jieba.cut(x)))#use ' ' cut the sentences into words
#print(data.head())
x = data['cut_message'].values
y = data['label'].values

from sklearn.cross_validation import train_test_split
train_x,test_x,train_y,test_y = train_test_split(x,y,test_size=0.1)#test_size:train_size=1:9
from sklearn.feature_extraction.text import TfidfTransformer,CountVectorizer
vectorizer = CountVectorizer()
x_train_termcounts = vectorizer.fit_transform(train_x)
 
tfidf_transformer = TfidfTransformer()
x_train_tfidf = tfidf_transformer.fit_transform(x_train_termcounts)


from sklearn.naive_bayes import GaussianNB,MultinomialNB
classifier = MultinomialNB().fit(x_train_tfidf,train_y)

x_input_termcounts = vectorizer.transform(test_x)
x_input_tfidf = tfidf_transformer.transform(x_input_termcounts)

predicted_categories = classifier.predict(x_input_tfidf)

from sklearn.metrics import accuracy_score#accurency_score
accuracy_score(test_y,predicted_categories)

#output some examples
category_map = {
    0:'normal',
    1:'spam'
}
for sentence,category,real in zip(test_x[:10],predicted_categories[:10],test_y[:10]):
    print('\nmessage_content:',sentence,'\npredicted_type:',category_map[category],'real_values:',category_map[real])

代碼來自"州的先生"

使用pandas處理數據,使用jieba進行中文分詞,之後調用sklearn包中的tfidf函數,貝葉斯函數,交叉驗證函數,準確率計算函數來進行垃圾短信的識別,最後輸出幾個實例展示。(sklearn包裏的函數使用還不太熟練,jieba還有pandas工具包以後要多加練習)

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章